max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
7
115
max_stars_count
int64
101
368k
id
stringlengths
2
8
content
stringlengths
6
1.03M
pulsar/apps/http/decompress.py
PyCN/pulsar
1,410
11127391
<reponame>PyCN/pulsar<gh_stars>1000+ import zlib class GzipDecompress: def __call__(self, data): deco = zlib.decompressobj(16 + zlib.MAX_WBITS) return deco.decompress(data) class DeflateDecompress: def __call__(self, data): try: return zlib.decompressobj().decompress(data) except zlib.error: return zlib.decompressobj(-zlib.MAX_WBITS).decompress(data)
bayespy/inference/__init__.py
dungvtdev/upsbayescpm
622
11127472
################################################################################ # Copyright (C) 2013 <NAME> # # This file is licensed under the MIT License. ################################################################################ """ Package for Bayesian inference engines Inference engines ----------------- .. autosummary:: :toctree: generated/ VB Parameter expansions -------------------- .. autosummary:: :toctree: generated/ vmp.transformations.RotationOptimizer vmp.transformations.RotateGaussian vmp.transformations.RotateGaussianARD vmp.transformations.RotateGaussianMarkovChain vmp.transformations.RotateSwitchingMarkovChain vmp.transformations.RotateVaryingMarkovChain vmp.transformations.RotateMultiple """ from .vmp.vmp import VB
RecoJets/JetProducers/python/hltMVAJetPuId_cff.py
Purva-Chaudhari/cmssw
852
11127476
from RecoJets.JetProducers.hltPUIdAlgo_cff import * import RecoJets.JetProducers.MVAJetPuIdProducer_cfi as _mod hltMVAJetPuIdCalculator = _mod.MVAJetPuIdProducer.clone( produceJetIds = False, algos = cms.VPSet(cms.VPSet(full_74x)), jec = "AK4PFchs" ) hltMVAJetPuIdEvaluator = hltMVAJetPuIdCalculator.clone( jetids = "pileupJetIdCalculator" )
tests/test_suite.py
chopeen/dataverse
681
11127478
# This contain a list of individual test and will be run from Jenkins. import unittest import test_access import test_create_test_account import test_root_dataverse import test_account import test_dataverse import test_dataset import test_dataset_fileupload # This is a list of testFileName.testClass def suite(): return unittest.TestSuite((\ unittest.makeSuite(test_access.test_access), unittest.makeSuite(test_create_test_account.test_create_test_account), unittest.makeSuite(test_root_dataverse.test_root_dataverse), unittest.makeSuite(test_account.test_account), unittest.makeSuite(test_dataverse.test_dataverse), unittest.makeSuite(test_dataset.test_dataset), unittest.makeSuite(test_dataset_fileupload.test_dataset_fileupload), )) if __name__ == '__main__': result = unittest.TextTestRunner(verbosity=2).run(suite())
examples/tutorials/tutorial3.py
uaca/deepy
260
11127494
#!/usr/bin/env python # -*- coding: utf-8 -*- import logging, os logging.basicConfig(level=logging.INFO) import numpy as np import theano import theano.tensor as T from deepy.dataset import MnistDataset, MiniBatches from deepy.networks import ComputationalGraph from deepy.trainers import optimize_updates from tutorial2 import MyJointTrainingModel model_path = os.path.join(os.path.dirname(__file__), "models", "tutorial3.gz") if __name__ == '__main__': model = graph.compile(input_dim=28 * 28) parameters = model.parameters gradients = T.grad(model.output, parameters) gradient_updates, _ = optimize_updates(parameters, gradients, config={"method": "MOMENTUM", "learning_rate": 0.03}) train_monitors = dict(model.training_monitors) test_monitors = dict(model.testing_monitors) train_monitors["cost"] = model.output test_monitors["cost"] = model.test_output train_iteration = theano.function(inputs=model.input_variables, outputs=train_monitors.values(), updates=gradient_updates, allow_input_downcast=True) valid_iteration = theano.function(inputs=model.input_variables, outputs=test_monitors.values(), allow_input_downcast=True) max_epochs = 10 mnist = MiniBatches(MnistDataset(), batch_size=20) for i in range(max_epochs): # Training cost_matrix = [] for inputs in mnist.train_set(): costs = train_iteration(*inputs) cost_matrix.append(costs) train_costs = list(zip(train_monitors.keys(), np.mean(cost_matrix, axis=0))) print "train", i, train_costs # Test with valid data cost_matrix = [] for inputs in mnist.valid_set(): costs = valid_iteration(*inputs) cost_matrix.append(costs) valid_costs = list(zip(test_monitors.keys(), np.mean(cost_matrix, axis=0))) print "valid", i, valid_costs model.save_params(model_path)
src/api/migrations/0012_delete_ffadminuser.py
nixplay/bullet-train-api
1,259
11127503
<filename>src/api/migrations/0012_delete_ffadminuser.py<gh_stars>1000+ # -*- coding: utf-8 -*- # Generated by Django 1.11.13 on 2018-05-18 10:42 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('api', '0011_auto_20180517_1646'), ] operations = [ migrations.DeleteModel( name='FFAdminUser', ), ]
imap_tools/folder.py
unqx/imap_tools
344
11127537
import re from typing import AnyStr, Optional, Iterable, List, Dict, Tuple from . import imap_utf7 from .consts import MailBoxFolderStatusOptions from .utils import check_command_status, pairs_to_dict, encode_folder from .errors import MailboxFolderStatusValueError, MailboxFolderSelectError, MailboxFolderCreateError, \ MailboxFolderRenameError, MailboxFolderDeleteError, MailboxFolderStatusError, MailboxFolderSubscribeError class FolderInfo: """ Mailbox folder info name: str - folder name delim: str - delimiter, a character used to delimit levels of hierarchy in a mailbox name flags: (str,) - folder flags A 'NIL' delimiter means that no hierarchy exists, the name is a "flat" name. """ __slots__ = 'name', 'delim', 'flags' def __init__(self, name: str, delim: str, flags: Tuple[str, ...]): self.name = name self.delim = delim self.flags = flags def __repr__(self): return "{}(name={}, delim={}, flags={})".format( self.__class__.__name__, repr(self.name), repr(self.delim), repr(self.flags)) def __eq__(self, other): return all(getattr(self, i) == getattr(other, i) for i in self.__slots__) class MailBoxFolderManager: """Operations with mail box folders""" def __init__(self, mailbox): self.mailbox = mailbox self._current_folder = None def set(self, folder: AnyStr, readonly: bool = False) -> tuple: """Select current folder""" result = self.mailbox.box.select(encode_folder(folder), readonly) check_command_status(result, MailboxFolderSelectError) self._current_folder = folder return result def exists(self, folder: str) -> bool: """Checks whether a folder exists on the server.""" return len(self.list('', folder)) > 0 def create(self, folder: AnyStr) -> tuple: """ Create folder on the server. Use email box delimiter to separate folders. Example for "|" delimiter: "folder|sub folder" """ result = self.mailbox.box._simple_command('CREATE', encode_folder(folder)) check_command_status(result, MailboxFolderCreateError) return result def get(self) -> Optional[str]: """ Get current folder :return: None - if folder is not selected str - if folder is selected """ return self._current_folder def rename(self, old_name: AnyStr, new_name: AnyStr) -> tuple: """Rename folder from old_name to new_name""" result = self.mailbox.box._simple_command( 'RENAME', encode_folder(old_name), encode_folder(new_name)) check_command_status(result, MailboxFolderRenameError) return result def delete(self, folder: AnyStr) -> tuple: """Delete folder""" result = self.mailbox.box._simple_command('DELETE', encode_folder(folder)) check_command_status(result, MailboxFolderDeleteError) return result def status(self, folder: Optional[AnyStr] = None, options: Optional[Iterable[str]] = None) -> Dict[str, int]: """ Get the status of a folder :param folder: mailbox folder, current folder if None :param options: [str] with values from MailBoxFolderStatusOptions.all | None - for get all options :return: dict with available options keys example: {'MESSAGES': 41, 'RECENT': 0, 'UIDNEXT': 11996, 'UIDVALIDITY': 1, 'UNSEEN': 5} """ command = 'STATUS' if folder is None: folder = self.get() if not options: options = tuple(MailBoxFolderStatusOptions.all) for opt in options: if opt not in MailBoxFolderStatusOptions.all: raise MailboxFolderStatusValueError(str(opt)) status_result = self.mailbox.box._simple_command( command, encode_folder(folder), '({})'.format(' '.join(options))) check_command_status(status_result, MailboxFolderStatusError) result = self.mailbox.box._untagged_response(status_result[0], status_result[1], command) check_command_status(result, MailboxFolderStatusError) status_data = [i for i in result[1] if type(i) is bytes][0] # may contain tuples with encoded names values = status_data.decode().split('(')[1].split(')')[0].split(' ') return {k: int(v) for k, v in pairs_to_dict(values).items() if str(v).isdigit()} def list(self, folder: AnyStr = '', search_args: str = '*', subscribed_only: bool = False) -> List[FolderInfo]: """ Get a listing of folders on the server :param folder: mailbox folder, if empty - get from root :param search_args: search arguments, is case-sensitive mailbox name with possible wildcards * is a wildcard, and matches zero or more characters at this position % is similar to * but it does not match a hierarchy delimiter :param subscribed_only: bool - get only subscribed folders :return: [FolderInfo] """ folder_item_re = re.compile(r'\((?P<flags>[\S ]*)\) (?P<delim>[\S]+) (?P<name>.+)') command = 'LSUB' if subscribed_only else 'LIST' typ, data = self.mailbox.box._simple_command( command, encode_folder(folder), encode_folder(search_args)) typ, data = self.mailbox.box._untagged_response(typ, data, command) result = [] for folder_item in data: if not folder_item: continue if type(folder_item) is bytes: folder_match = re.search(folder_item_re, imap_utf7.decode(folder_item)) if not folder_match: continue folder_dict = folder_match.groupdict() name = folder_dict['name'] if name.startswith('"') and name.endswith('"'): name = name[1:-1] elif type(folder_item) is tuple: # when name has " or \ chars folder_match = re.search(folder_item_re, imap_utf7.decode(folder_item[0])) if not folder_match: continue folder_dict = folder_match.groupdict() name = imap_utf7.decode(folder_item[1]) else: continue result.append(FolderInfo( name=name, delim=folder_dict['delim'].replace('"', ''), flags=tuple(folder_dict['flags'].split()) # noqa, )) return result def subscribe(self, folder: AnyStr, value: bool) -> tuple: """subscribe/unsubscribe to folder""" method = self.mailbox.box.subscribe if value else self.mailbox.box.unsubscribe result = method(encode_folder(folder)) check_command_status(result, MailboxFolderSubscribeError) return result
textworld/generator/tests/test_logger.py
JohnnySun8/TextWorld
307
11127539
<gh_stars>100-1000 # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT license. import numpy as np from os.path import join as pjoin import textworld from textworld.utils import make_temp_directory from textworld.generator.logger import GameLogger def test_logger(): rng = np.random.RandomState(1234) game_logger = GameLogger() for _ in range(10): options = textworld.GameOptions() options.nb_rooms = 5 options.nb_objects = 10 options.chaining.max_depth = 3 options.chaining.max_breadth = 3 options.seeds = rng.randint(65635) game = textworld.generator.make_game(options) game_logger.collect(game) with make_temp_directory(prefix="textworld_tests") as tests_folder: filename = pjoin(tests_folder, "game_logger.pkl") game_logger.save(filename) game_logger2 = GameLogger.load(filename) assert game_logger is not game_logger2 assert game_logger.stats() == game_logger2.stats()
realnvp/realnvp_helpers.py
bjlkeng/sandbox
158
11127548
<reponame>bjlkeng/sandbox import numpy as np from tensorflow.keras import backend as K from tensorflow.python.keras.layers import (InputSpec, Layer) from tensorflow.python.keras.layers.normalization import BatchNormalizationBase from tensorflow.python.keras.utils import tf_utils from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn class Mask(Layer): def __init__(self, mask_type, *args, **kwargs): mask_type in ['check_even', 'check_odd', 'channel_even', 'channel_odd'] self.mask_type = mask_type super().__init__(*args, **kwargs) def build_mask(self, input_shape): def spatial_mask_value(row, col): if row % 2 == 0: ret = 1 if col % 2 == 0 else 0 else: ret = 0 if col % 2 == 0 else 1 return ret if self.mask_type == 'check_even' else 1 - ret def channel_mask(chn): return 1 - chn % 2 if self.mask_type == 'channel_even' else chn % 2 data = np.ones(input_shape) for row in range(input_shape[0]): for col in range(input_shape[1]): for chn in range(input_shape[2]): if self.mask_type in ['check_even', 'check_odd']: data[row, col, chn] = spatial_mask_value(row, col) else: assert self.mask_type in ['channel_even', 'channel_odd'] # channel mask data[row, col, chn] = channel_mask(chn) return K.constant(np.ravel(data), dtype='float32', shape=input_shape) def build(self, input_shape): assert len(input_shape) == 4, \ 'Layer assumes a (batch, row, col, chn) dimensions got {}' \ .format(input_shape) # Assume channel_last (tensorflow) channel_axis = -1 if input_shape[channel_axis] is None: raise ValueError('The channel dimension of the inputs ' 'should be defined. Found `None`.') input_dim = input_shape[channel_axis] self.mask = self.build_mask(input_shape[1:]) # Set input spec. self.input_spec = InputSpec(ndim=len(input_shape), axes={channel_axis: input_dim}) self.built = True def call(self, inputs): return self.mask * inputs class FlowBatchNorm(BatchNormalizationBase): """ Modified BatchNorm implementation so that I can add determiniant loss for flow-based networks Layer """ def __init__(self, axis=-1, momentum=0.99, epsilon=1e-3, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None, renorm=False, renorm_clipping=None, renorm_momentum=0.99, trainable=True, name=None, **kwargs): super(FlowBatchNorm, self).__init__(name=name, fused=False, virtual_batch_size=None, adjustment=None, renorm=False, **kwargs) def call(self, inputs, training=None): training = self._get_training_value(training) assert self.virtual_batch_size is None, "Disabled" assert self.fused is False, "Disabled" assert self.adjustment is None, "Disabled" assert self.renorm is False, "Disabled" # Compute the axes along which to reduce the mean / variance input_shape = inputs.shape ndims = len(input_shape) reduction_axes = [i for i in range(ndims) if i not in self.axis] # Broadcasting only necessary for single-axis batch norm where the axis is # not the last dimension broadcast_shape = [1] * ndims broadcast_shape[self.axis[0]] = input_shape.dims[self.axis[0]].value def _broadcast(v): cond = (v is not None and len(v.shape) != ndims and reduction_axes != list(range(ndims - 1))) if cond: return array_ops.reshape(v, broadcast_shape) return v scale, offset = _broadcast(self.gamma), _broadcast(self.beta) def _compose_transforms(scale, offset, then_scale, then_offset): if then_scale is not None: scale *= then_scale offset *= then_scale if then_offset is not None: offset += then_offset return (scale, offset) # Determine a boolean value for `training`: could be True, False, or None. training_value = tf_utils.constant_value(training) if training_value is False: mean, variance = self.moving_mean, self.moving_variance else: # Some of the computations here are not necessary when training==False # but not a constant. However, this makes the code simpler. keep_dims = self.virtual_batch_size is not None or len(self.axis) > 1 mean, variance = self._moments( math_ops.cast(inputs, self._param_dtype), reduction_axes, keep_dims=keep_dims) moving_mean = self.moving_mean moving_variance = self.moving_variance mean = tf_utils.smart_cond(training, lambda: mean, lambda: ops.convert_to_tensor_v2(moving_mean)) variance = tf_utils.smart_cond( training, lambda: variance, lambda: ops.convert_to_tensor_v2(moving_variance)) new_mean, new_variance = mean, variance if self._support_zero_size_input(): # Keras assumes that batch dimension is the first dimension for Batch # Normalization. input_batch_size = array_ops.shape(inputs)[0] else: input_batch_size = None def _do_update(var, value): """Compute the updates for mean and variance.""" return self._assign_moving_average(var, value, self.momentum, input_batch_size) def mean_update(): true_branch = lambda: _do_update(self.moving_mean, new_mean) false_branch = lambda: self.moving_mean return tf_utils.smart_cond(training, true_branch, false_branch) def variance_update(): """Update the moving variance.""" def true_branch_renorm(): # We apply epsilon as part of the moving_stddev to mirror the training # code path. moving_stddev = _do_update(self.moving_stddev, math_ops.sqrt(new_variance + self.epsilon)) return self._assign_new_value( self.moving_variance, # Apply relu in case floating point rounding causes it to go # negative. K.relu(moving_stddev * moving_stddev - self.epsilon)) if self.renorm: true_branch = true_branch_renorm else: true_branch = lambda: _do_update(self.moving_variance, new_variance) false_branch = lambda: self.moving_variance return tf_utils.smart_cond(training, true_branch, false_branch) self.add_update(mean_update) self.add_update(variance_update) mean = math_ops.cast(mean, inputs.dtype) variance = math_ops.cast(variance, inputs.dtype) if offset is not None: offset = math_ops.cast(offset, inputs.dtype) if scale is not None: scale = math_ops.cast(scale, inputs.dtype) # TODO(reedwm): Maybe do math in float32 if given float16 inputs, if doing # math in float16 hurts validation accuracy of popular models like resnet. outputs = nn.batch_normalization(inputs, _broadcast(mean), _broadcast(variance), offset, scale, self.epsilon) # If some components of the shape got lost due to adjustments, fix that. outputs.set_shape(input_shape) # bkeng: Flow loss/metric self.add_flow_loss(variance, scale) return outputs def add_flow_loss(self, variance, scale): pass # def call(self, inputs, training=None): # input_shape = K.int_shape(inputs) # # assert input_shape[0] is not None, "Must explicitly specify batch size" # # Prepare broadcasting shape. # ndim = len(input_shape) # reduction_axes = list(range(len(input_shape))) # del reduction_axes[self.axis] # broadcast_shape = [1] * len(input_shape) # broadcast_shape[self.axis] = input_shape[self.axis] # # # Determines whether broadcasting is needed. # needs_broadcasting = (sorted(reduction_axes) != list(range(ndim))[:-1]) # # def normalize_inference(): # if needs_broadcasting: # # In this case we must explicitly broadcast all parameters. # broadcast_moving_mean = K.reshape(self.moving_mean, # broadcast_shape) # broadcast_moving_variance = K.reshape(self.moving_variance, # broadcast_shape) # if self.center: # broadcast_beta = K.reshape(self.beta, broadcast_shape) # else: # broadcast_beta = None # if self.scale: # broadcast_gamma = K.reshape(self.gamma, # broadcast_shape) # else: # broadcast_gamma = None # return K.batch_normalization( # inputs, # broadcast_moving_mean, # broadcast_moving_variance, # broadcast_beta, # broadcast_gamma, # axis=self.axis, # epsilon=self.epsilon) # else: # return K.batch_normalization( # inputs, # self.moving_mean, # self.moving_variance, # self.beta, # self.gamma, # axis=self.axis, # epsilon=self.epsilon) # # # If the learning phase is *static* and set to inference: # if training in {0, False}: # return normalize_inference(), self.moving_mean, self.moving_variance # # # If the learning is either dynamic, or set to training: # normed_training, mean, variance = K.normalize_batch_in_training( # inputs, self.gamma, self.beta, reduction_axes, # epsilon=self.epsilon) # # # bkeng: Explicitly add determinant loss here as: `-log(gamma / sqrt(var + eps))` # def expand_batch(tensor): # return inputs * 0 + tensor # loss = expand_batch(-K.log(self.gamma) + 0.5 * K.log(variance + self.epsilon)) # self.add_loss(loss, inputs=True) # self.add_metric(loss, aggregation='mean', name='BatchNormLoss') # # if K.backend() != 'cntk': # sample_size = K.prod([K.shape(inputs)[axis] # for axis in reduction_axes]) # sample_size = K.cast(sample_size, dtype=K.dtype(inputs)) # if K.backend() == 'tensorflow' and sample_size.dtype != 'float32': # sample_size = K.cast(sample_size, dtype='float32') # # # sample variance - unbiased estimator of population variance # variance *= sample_size / (sample_size - (1.0 + self.epsilon)) # # self.add_update([K.moving_average_update(self.moving_mean, # mean, # self.momentum), # K.moving_average_update(self.moving_variance, # variance, # self.momentum)], # inputs) # # # # Pick the normalized form corresponding to the training phase. # return K.in_train_phase(normed_training, normalize_inference, training=training)
migrations/versions/62c10b7111e4_.py
brayest/testcode
652
11127552
"""empty message Revision ID: <KEY> Revises: <PASSWORD> Create Date: 2019-04-01 19:52:11.577324 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '<KEY>' down_revision = 'c4734f6489df' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column('students', 'creator_id', existing_type=sa.INTEGER(), nullable=False) op.add_column('users', sa.Column('image', sa.String(length=240), nullable=True)) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column('users', 'image') op.alter_column('students', 'creator_id', existing_type=sa.INTEGER(), nullable=True) # ### end Alembic commands ###
plugins/sublime/tandem.py
geoffxy/tandem
732
11127595
import os import sys import random from subprocess import Popen, PIPE from threading import Thread, Event import sublime import sublime_plugin from tandem.diff_match_patch import diff_match_patch from tandem.edit import Edit # sys hack to add enum, required by the messages module file sys.path.append(os.path.join(os.path.dirname(__file__), "enum-dist")) import tandem.agent.tandem.agent.protocol.messages.editor as m # noqa DEBUG = False is_active = False is_processing = False patch = diff_match_patch() def spawn_agent(extra_args=None): if extra_args is None: extra_args = [] dirname = os.path.dirname(__file__) filename = str(os.path.join(dirname, "agent/main.py")) return Popen( ["python3", filename] + extra_args, stdin=PIPE, stdout=PIPE, ) def get_string_port(): starting_port = random.randint(60600, 62600) return str(starting_port) def index_to_point(buffer_line_lengths, index): index_left = index for i in range(len(buffer_line_lengths)): if index_left >= buffer_line_lengths[i] + 1: index_left -= buffer_line_lengths[i] + 1 else: return (i, index_left) def error(): print("An error occurred.") def show_message(msg, show_gui): if show_gui: sublime.message_dialog(msg) else: print(msg) class TandemCommand(sublime_plugin.TextCommand): def run(self, edit, host_ip=None, host_port=None, show_gui=False): global tandem_agent tandem_agent.start(self.view, show_gui=show_gui) def is_enabled(self): global is_active return not is_active class TandemConnectCommand(sublime_plugin.TextCommand): def _start(self, args): global tandem_agent tandem_agent.start(self.view, session_id=args, show_gui=True) def run(self, edit): global is_active if is_active: msg = "Cannot start. An instance is already running on :{}".format( tandem_agent.agent_port, ) show_message(msg, True) return sublime.active_window().show_input_panel( caption="Enter Session ID", initial_text="", on_done=self._start, on_change=None, on_cancel=None, ) def is_enabled(self): global is_active return not is_active class TandemStopCommand(sublime_plugin.TextCommand): def run(self, edit, show_gui=False): global tandem_agent tandem_agent.stop(show_gui) def is_enabled(self): global is_active return is_active class TandemSessionCommand(sublime_plugin.TextCommand): def run(self, edit, show_gui=False): global tandem_agent tandem_agent.show_session_id(show_gui) def is_enabled(self): global is_active return is_active class TandemPlugin: @property def agent_port(self): return self._agent_port @property def _current_buffer(self): return self._view.substr(sublime.Region(0, self._view.size())) def _initialize(self, view): self._view = view self._buffer = "" self._output_checker = Thread(target=self._check_message) self._text_applied = Event() self._session_id = None def _start_agent(self): self._agent_port = get_string_port() self._agent = spawn_agent([ "--port", self._agent_port, "--log-file", "/tmp/tandem-agent-{}.log".format(self._agent_port), ]) if self._connect_to is not None: message = m.JoinSession(self._connect_to) else: message = m.HostSession() self._agent.stdin.write(m.serialize(message).encode("utf-8")) self._agent.stdin.write("\n".encode("utf-8")) self._agent.stdin.flush() self._agent_stdout_iter = iter(self._agent.stdout.readline, b"") self._output_checker.start() def _shut_down_agent(self): self._agent.stdin.close() self._agent.terminate() self._agent.wait() def check_buffer(self, buffer_id): if self._view.buffer_id() != buffer_id: return current_buffer = self._current_buffer if len(current_buffer) != len(self._buffer): self._send_patches(current_buffer) else: for i in range(len(current_buffer)): if current_buffer[i] != self._buffer[i]: self._send_patches(current_buffer) break self._buffer = current_buffer def _create_patch(self, start, end, text): if start is None or end is None or text is None: # Raise an error if in debug mode, otherwise return None if DEBUG: raise ValueError else: return None return [ { "start": { "row": start[0], "column": start[1], }, "end": { "row": end[0], "column": end[1], }, "text": "", }, { "start": { "row": start[0], "column": start[1], }, "end": { "row": 0, "column": 0, }, "text": text, } ] def _send_patches(self, current_buffer): try: prev_contents = self._buffer curr_contents = current_buffer diff_patches = patch.patch_make(prev_contents, curr_contents) patches = [] length_buffer = [len(x) for x in prev_contents.split(os.linesep)] for p in diff_patches: start_index = p.start1 end_index = p.start1 + p.length1 start_index_offset = 0 end_index_offset = 0 while(len(p.diffs)): (op, data) = p.diffs[0] if (op != diff_match_patch.DIFF_EQUAL): break start_index_offset = start_index_offset + len(data) p.diffs.pop(0) while(len(p.diffs)): (op, data) = p.diffs[-1] if (op != diff_match_patch.DIFF_EQUAL): break end_index_offset = end_index_offset + len(data) p.diffs.pop() start_rc = index_to_point( length_buffer, start_index + start_index_offset, ) end_rc = index_to_point( length_buffer, end_index - end_index_offset, ) text = [] for (op, data) in p.diffs: if op == diff_match_patch.DIFF_INSERT or \ op == diff_match_patch.DIFF_EQUAL: text.append(data) text = "".join(text) text_lengths = [len(word) for word in text.split(os.linesep)] if start_rc[0] == end_rc[0]: length_buffer[start_rc[0]] += text_lengths[0] length_buffer[start_rc[0]] -= end_rc[1] - start_rc[1] length_buffer[start_rc[0] + 1: start_rc[0] + 1] = \ text_lengths[1:] else: if len(text_lengths) > 1: length_buffer[start_rc[0]] = \ start_rc[1] + text_lengths[0] length_buffer[end_rc[0]] = length_buffer[end_rc[0]] \ - end_rc[1] + text_lengths[-1] length_buffer[start_rc[0] + 1: end_rc[0]] = \ text_lengths[1:-1] else: length_buffer[start_rc[0]] = \ start_rc[1] + text_lengths[0] \ + length_buffer[end_rc[0]] - end_rc[1] length_buffer[start_rc[0] + 1: end_rc[0] + 1] = [] patches.extend( self._create_patch(start_rc, end_rc, text) ) patches = [p for p in patches if p is not None] if len(patches) > 0: message = m.NewPatches(patches) self._agent.stdin.write(m.serialize(message).encode("utf-8")) self._agent.stdin.write("\n".encode("utf-8")) self._agent.stdin.flush() except: error() if DEBUG: raise def _read_message(self): try: binary_line = next(self._agent_stdout_iter) line = binary_line.decode("utf-8") return m.deserialize(line) except StopIteration: return None def _check_message(self): while True: self._text_applied.clear() message = self._read_message() if message is None: break def callback(): self._handle_message(message) sublime.set_timeout(callback, 0) self._text_applied.wait() def _handle_write_request(self, message): # Flush out any non-diff'd changes first self.check_buffer(self._view.buffer_id()) # Allow agent to apply remote operations ack = m.WriteRequestAck(message.seq) self._agent.stdin.write(m.serialize(ack).encode("utf-8")) self._agent.stdin.write("\n".encode("utf-8")) self._agent.stdin.flush() try: # Read, expect, and process an ApplyPatches message message = self._read_message() if not isinstance(message, m.ApplyPatches): raise ValueError("Invalid message. Expected ApplyPatches.") self._handle_apply_patches(message) except ValueError as v: raise v def _handle_apply_patches(self, message): for patch in message.patch_list: start = patch["oldStart"] end = patch["oldEnd"] text = patch["newText"] start_point = self._view.text_point( start["row"], start["column"], ) end_point = self._view.text_point( end["row"], end["column"], ) """ Edit cannot be passed around https://forum.sublimetext.com/t/multithreaded-plugin/14439 Use view abstraction instead. """ with Edit(self._view) as edit: edit.replace( sublime.Region(start_point, end_point), text, ) self._buffer = self._current_buffer def _handle_message(self, message): global is_processing is_processing = True try: if isinstance(message, m.WriteRequest): self._handle_write_request(message) elif isinstance(message, m.ApplyPatches): raise ValueError("Invalid message. ApplyPatches must be " "preceeded by a WriteRequest.") elif isinstance(message, m.SessionInfo): self._session_id = message.session_id show_message("Session ID: {}".format(message.session_id), True) else: raise ValueError("Unsupported message.") except ValueError as v: raise v finally: is_processing = False self._text_applied.set() def start(self, view, session_id=None, show_gui=False): global is_active if is_active: msg = "Cannot start. An instance is already running on :{}".format( self._agent_port, ) show_message(msg, show_gui) return self._connect_to = session_id if self._connect_to is not None: view = sublime.active_window().new_file() self._initialize(view) self._start_agent() is_active = True if self._connect_to is None: self.check_buffer(view.buffer_id()) def stop(self, show_gui): global is_active if not is_active: msg = "No Tandem instance running." show_message(msg, show_gui) return is_active = False self._shut_down_agent() if self._output_checker.isAlive(): self._output_checker.join() msg = "Tandem instance shut down." show_message(msg, show_gui) def show_session_id(self, show_gui): global is_active if not is_active: msg = "No Tandem instance running." show_message(msg, show_gui) return if self._session_id is not None: message = "Session ID: {}".format(self._session_id) else: message = "Error: No Session ID assigned." show_message(message, show_gui) class TandemTextChangedListener(sublime_plugin.EventListener): def on_modified(self, view): global is_active global is_processing if not is_active or is_processing: return global tandem_agent tandem_agent.check_buffer(view.buffer_id()) tandem_agent = TandemPlugin()
common/setup.py
krisshol/bach-kmno
248
11127610
from distutils.core import setup from src import __version__ setup( name="irma.common", version=__version__, author="Quarkslab", author_email="<EMAIL>", description="The common component of the IRMA software", packages=["irma.common", "irma.common.base", "irma.common.utils", "irma.common.configuration", "irma.common.ftp", "irma.common.plugins"], package_dir={"irma.common": "src", "irma.common.utils": "src/utils", "irma.common.base": "src/base", "irma.common.plugins": "src/plugins"}, namespace_packages=["irma"] )
external-import/crowdstrike/src/crowdstrike/report/__init__.py
tiiibs/connectors
132
11127617
<filename>external-import/crowdstrike/src/crowdstrike/report/__init__.py # -*- coding: utf-8 -*- """OpenCTI CrowdStrike report module."""
spec/fixtures/repeated_packed_write.py
Hyhyx/lua-pb
225
11127661
<filename>spec/fixtures/repeated_packed_write.py<gh_stars>100-1000 import repeated_packed_pb2 thing = repeated_packed_pb2.Thing() thing.parts.append(77) thing.parts.append(999) f = open('repeated_packed.bin', 'wb') f.write(thing.SerializeToString()) f.close()
recipes/Python/303338_Helper_subclass/recipe-303338.py
tdiprima/code
2,023
11127679
"""Helper code for win32pdhquery. For clarification of discussion, a "tick" is a measurement taken after a pre- defined amount of elapsed time. Data is stored in the following format:: {kind name : {instance name : {counter name : tick measurements}}} If a counter does not have an applicable instance (such as memory counters), the instance name is set to 'N/A'. Typical usage is:: >>> query = QueryHelper(.1) # How often to measure, in seconds >>> query.addcounter("Processor", "_Total", "% Processor Time") >>> query.addcounter("Memory", None, "Page Faults/sec") >>> query.start() >>> query.stop() Print the instance for a human-readable format. Use csvoutput or csvsave for CSV output. Access to the results dict can be reached using 'results'. Use picklesave to pickle the results dict. The repr of the query can be used to see what counters were used. """ from win32pdhquery import Query, QueryError from itertools import izip, chain from cStringIO import StringIO from time import sleep from datetime import datetime import csv import pickle def interfacename(kind, inst): """Return first half of the name of a counter properly formatted""" if inst: return "%s(%s)" % (kind, inst) else: return kind class QueryHelper(Query): """Subclass of win32pdhquery.Query with a cleaner interface and some helper methods""" def __init__(self, tick_freq): """Initialize instance while storing frequency of measurement ticks""" self.tick_freq = tick_freq self._counters = {} Query.__init__(self) def addcounter(self, kind, inst, counter): """Add a counter for 'inst' of 'type' obj (using PDH terminology). Arguments: - kind type of object for the counter to work on (Process, Network Interface, etc.) - inst instance of 'type' (IEXPLORE, etc.; set to None if not applicable) - counter counter name """ interface_name = interfacename(kind, inst) self.rawaddcounter(interface_name, counter) kind_dict = self._counters.setdefault(kind, {}) inst_dict = kind_dict.setdefault(inst, {}) def start(self): """Start the counter with measurements at the frequency specified at instance creation""" self.starttime = datetime.utcnow() self.collectdatawhile(self.tick_freq) def stop(self): """Stop collecting data""" self.collectdatawhile_stop() # Need to make sure to wait long enough for any nagging measurement to # finish if self.tick_freq < 1: sleep(1) else: sleep(2 * self.tick_freq) self._parsedata() def _parsedata(self): """Take the measurement data and store it into self._results in the documented format""" results_dict = {} index_mapping = [] for index, interface_and_counter in enumerate(self.curpaths): # Skip first split value since always an empty string interface,counter = interface_and_counter.split('\\')[1:] try: paren = interface.index('(') except ValueError: paren = len(interface) kind = interface[:paren] inst = interface[paren+1:-1] # Not having a specific instance (memory measurements, for instance) # leads to 'N/A' being used as the name if not inst: inst = 'N/A' index_mapping.append((kind, inst, counter)) kind_dict = results_dict.setdefault(kind, {}) inst_dict = kind_dict.setdefault(inst, {}) inst_dict.setdefault(counter, []) # If the counter was stopped too quickly it may not have gotten # any values; just stick in empty values if not hasattr(self, "curresults"): for kind_dict in results_dict.itervalues(): for inst_dict in kind_dict.itervalues(): for counter in inst: inst_dict[counter] = ([], 0) else: for dataset in self.curresults: for index,data in enumerate(dataset): kind,inst,counter = index_mapping[index] results_dict[kind][inst][counter].append(data) for kind in results_dict.iterkeys(): for inst in results_dict[kind].iterkeys(): for counter in results_dict[kind][inst].iterkeys(): ticks = results_dict[kind][inst][counter] results_dict[kind][inst][counter] = ticks self._results = results_dict def instticks(self, kind, inst): """Return a two-item tuple; first item contains counter names, second contains a sequence of sequences containing values per tick""" counter_names = self._results[kind][inst].keys() ticks = izip(*[self._results[kind][inst][counter_name] for counter_name in counter_names]) return (counter_names, ticks) def results(self): """Return the results dict""" return self._results def __repr__(self): """Return the counter paths""" return str(self.paths) def __str__(self): """If counter is finished, print out results, else print out the counter paths""" if not hasattr(self, "_results"): return self.__repr__() else: str_file = StringIO() try: for kind_key, inst_dict in self._results.iteritems(): print>>str_file, kind_key for inst_key in inst_dict.iterkeys(): print>>str_file, "\t%s" % inst_key counters,ticks = self.instticks(kind_key, inst_key) print>>str_file, "\t", for counter in counters: print>>str_file, " %s" % counter, else: print>>str_file, "\n" for tick in ticks: print>>str_file, "\t", for index,data in enumerate(tick): print>>str_file, str(data).rjust(len(counters[index])+4), else: print>>str_file, "\n", else: print>>str_file, "\n", print>>str_file, "\n", finally: results = str_file.getvalue() str_file.close() return results def csvsave(self, CSVFILE): """Save measurements in CSV format to the passed-in file object""" counter_triples = [] for kind in self._results.iterkeys(): for inst in self._results[kind].iterkeys(): for counter in self._results[kind][inst].iterkeys(): counter_triples.append((kind, inst, counter)) kind,inst,counter = counter_triples[0] tick_count = len(self._results[kind][inst][counter]) csv_writer = csv.writer(CSVFILE) csv_writer.writerow(["\\%s\\%s" % (interfacename(kind,inst), counter) for kind,inst,counter in counter_triples]) for tick_index in xrange(tick_count): csv_writer.writerow([self._results[kind][inst][counter][tick_index] for kind,inst,counter in counter_triples]) def csvoutput(self): """Output data in CSV format as a string""" FILE = StringIO() try: self.csvsave(FILE) results = FILE.getvalue() finally: FILE.close() return results def picklesave(self, PICKLEFILE): """Save measurements dict in a pickle file""" try: pickle.dump(self._results, PICKLEFILE, -1) finally: PICKLEFILE.close()
Chapter12/libbots/utils.py
feiwang20/DRLHandsOn-Playground
2,497
11127689
import string from nltk.translate import bleu_score from nltk.tokenize import TweetTokenizer def calc_bleu_many(cand_seq, ref_sequences): sf = bleu_score.SmoothingFunction() return bleu_score.sentence_bleu(ref_sequences, cand_seq, smoothing_function=sf.method1, weights=(0.5, 0.5)) def calc_bleu(cand_seq, ref_seq): return calc_bleu_many(cand_seq, [ref_seq]) def tokenize(s): return TweetTokenizer(preserve_case=False).tokenize(s) def untokenize(words): return "".join([" " + i if not i.startswith("'") and i not in string.punctuation else i for i in words]).strip()
tests/queue_test.py
maratsarbasov/aioprocessing
517
11127698
<reponame>maratsarbasov/aioprocessing import unittest from concurrent.futures import ProcessPoolExecutor import aioprocessing from aioprocessing.mp import Process, Event, util from ._base_test import BaseTest, _GenMixin def queue_put(q, val): val = q.put(val) return val def queue_get(q, e): val = q.get() e.set() q.put(val) class GenQueueMixin(_GenMixin): def setUp(self): super().setUp() self.inst = self.Obj() self.meth = "coro_get" def _after(self): self.inst.put(1) class GenAioQueueTest(GenQueueMixin, BaseTest): def setUp(self): self.Obj = aioprocessing.AioQueue super().setUp() class GenAioSimpleQueueTest(GenQueueMixin, BaseTest): def setUp(self): self.Obj = aioprocessing.AioSimpleQueue super().setUp() class GenAioJoinableQueueTest(GenQueueMixin, BaseTest): def setUp(self): self.Obj = aioprocessing.AioJoinableQueue super().setUp() class QueueTest(BaseTest): def test_blocking_put(self): q = aioprocessing.AioQueue() async def queue_put(): await q.coro_put(1) self.loop.run_until_complete(queue_put()) self.assertEqual(q.get(), 1) def test_put_get(self): q = aioprocessing.AioQueue() val = 1 p = Process(target=queue_put, args=(q, val)) async def queue_get(): ret = await q.coro_get() self.assertEqual(ret, val) p.start() self.loop.run_until_complete(queue_get()) p.join() def test_get_put(self): q = aioprocessing.AioQueue() e = Event() val = 2 async def queue_put(): await q.coro_put(val) p = Process(target=queue_get, args=(q, e)) p.start() self.loop.run_until_complete(queue_put()) e.wait() out = q.get() p.join() self.assertEqual(out, val) def test_simple_queue(self): q = aioprocessing.AioSimpleQueue() val = 8 async def queue_put(): await q.coro_put(val) self.loop.run_until_complete(queue_put()) out = q.get() self.assertEqual(val, out) class ManagerQueueTest(BaseTest): @unittest.skipIf( "multiprocess.util" in str(util), "concurrent.futures is not yet supported by uqfoundation " "(https://github.com/uqfoundation/pathos/issues/90)" ) def test_executor(self): m = aioprocessing.AioManager() q = m.AioQueue() p = ProcessPoolExecutor(max_workers=1) val = 4 def submit(): yield p.submit(queue_put, q, val) next(submit()) async def queue_get(): out = await q.coro_get() self.assertEqual(out, val) await q.coro_put(5) self.loop.run_until_complete(queue_get()) returned = q.get() self.assertEqual(returned, 5) p.shutdown() class JoinableQueueTest(BaseTest): def test_join_empty_queue(self): q = aioprocessing.AioJoinableQueue() async def join(): await q.coro_join() self.loop.run_until_complete(join()) if __name__ == "__main__": unittest.main()
tests/pytests/unit/states/test_redismod.py
babs/salt
9,425
11127711
""" :codeauthor: <NAME> <<EMAIL>> """ import pytest import salt.states.redismod as redismod from tests.support.mock import MagicMock, patch @pytest.fixture def configure_loader_modules(): return {redismod: {}} def test_string(): """ Test to ensure that the key exists in redis with the value specified. """ name = "key_in_redis" value = "string data" ret = { "name": name, "changes": {}, "result": True, "comment": "Key already set to defined value", } mock = MagicMock(return_value=value) with patch.dict(redismod.__salt__, {"redis.get_key": mock}): assert redismod.string(name, value) == ret def test_absent(): """ Test to ensure key absent from redis. """ name = "key_in_redis" ret = {"name": name, "changes": {}, "result": True, "comment": ""} mock = MagicMock(side_effect=[False, True, True]) mock_t = MagicMock(return_value=False) with patch.dict(redismod.__salt__, {"redis.exists": mock, "redis.delete": mock_t}): comt = "`keys` not formed as a list type" ret.update({"comment": comt, "result": False}) assert redismod.absent(name, "key") == ret comt = "Key(s) specified already absent" ret.update({"comment": comt, "result": True}) assert redismod.absent(name, ["key"]) == ret comt = "Keys deleted" ret.update({"comment": comt, "changes": {"deleted": ["key"]}}) assert redismod.absent(name, ["key"]) == ret comt = "Key deleted" ret.update({"comment": comt, "changes": {"deleted": ["key_in_redis"]}}) assert redismod.absent(name) == ret
webstruct/features/block_features.py
vishalbelsare/webstruct
210
11127725
# -*- coding: utf-8 -*- from __future__ import absolute_import __all__ = ['parent_tag', 'InsideTag', 'borders', 'block_length'] def _inside_tag(elem, tagname): """ >>> from lxml.html import fragment_fromstring >>> root = fragment_fromstring('<div><i>foo</i><strong><p>head 1</p></strong></div>') >>> elem = list(root.iter('p'))[0] >>> _inside_tag(elem, 'strong') True >>> _inside_tag(elem, 'div') True >>> _inside_tag(elem, 'p') True >>> _inside_tag(elem, 'span') False >>> _inside_tag(elem, 'i') False """ if elem.tag == tagname: return True return any(e is not None for e in elem.iterancestors(tagname)) def parent_tag(html_token): return {'parent_tag': html_token.parent.tag} class InsideTag(object): def __init__(self, tagname): self.tagname = tagname self.key = 'inside_tag_' + tagname def __call__(self, html_token): return {self.key: _inside_tag(html_token.elem, self.tagname)} def borders(html_token): return { 'border_at_left': html_token.index == 0, 'border_at_right': html_token.index == len(html_token.tokens)-1, } def block_length(html_token): block_len = len(html_token.tokens) if block_len == 1: bl = '1' elif 1 < block_len <= 10: bl = 'short' elif 10 < block_len <= 20: bl = 'medium' else: bl = 'large' return {'block_length': bl}
components/aws/sagemaker/tests/unit_tests/tests/common/dummy_component.py
Strasser-Pablo/pipelines
2,860
11127734
<reponame>Strasser-Pablo/pipelines<filename>components/aws/sagemaker/tests/unit_tests/tests/common/dummy_component.py from tests.unit_tests.tests.common.dummy_spec import ( DummyInputs, DummyOutputs, DummySpec, ) from common.sagemaker_component import ComponentMetadata, SageMakerComponent @ComponentMetadata( name="Dummy component", description="Dummy description", spec=DummySpec ) class DummyComponent(SageMakerComponent): pass
theseus/utils/tests/test_utils.py
jeffin07/theseus
236
11127800
<gh_stars>100-1000 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import pytest # noqa: F401 import torch import torch.nn as nn import theseus.utils as thutils def test_build_mlp(): # set seed for mlp torch.manual_seed(0) # create name to class map for activation function act_name_to_cls_map = {"relu": nn.ReLU, "elu": nn.ELU, "tanh": nn.Tanh} # set test parameters test_hidden_depth = [0, 1, 2] input_dim = 3 hidden_dim = 4 output_dim = 5 batch_size = 16 for hidden_depth in test_hidden_depth: for act_name in act_name_to_cls_map.keys(): sample_mlp = thutils.build_mlp( input_dim, hidden_dim, output_dim, hidden_depth, act_name ) # check depth by counting linear modules layer_count = 0 for curr_mod in sample_mlp: if isinstance(curr_mod, nn.Linear): layer_count += 1 created_depth = layer_count - 1 assert ( created_depth == hidden_depth ), f"Incorrect number of layers: {created_depth} should be {hidden_depth}" # check input and output sizes x = torch.rand(batch_size, input_dim) y = sample_mlp(x) # automatically tests input size assert ( y.size(0) == batch_size and y.size(1) == output_dim ), "Incorrect output tensor created" # check size of each layer in mlp def _get_layer_sizes(layer_id): layer_input_dim, layer_output_dim = hidden_dim, hidden_dim if layer_id == 0: layer_input_dim = input_dim if layer_id == created_depth: layer_output_dim = output_dim return layer_input_dim, layer_output_dim layer_id = 0 for curr_mod in sample_mlp: if isinstance(curr_mod, nn.Linear): layer_input_dim, layer_output_dim = _get_layer_sizes(layer_id) x = torch.rand(batch_size, layer_input_dim) y = curr_mod(x) assert ( y.size(0) == batch_size and y.size(1) == layer_output_dim ), f"Incorrect hidden layer dimensions at layer {layer_id}" layer_id += 1 # check activation function # assume all non Linear layers must be activation layers act_cls = act_name_to_cls_map[act_name] for curr_mod in sample_mlp: if not isinstance(curr_mod, nn.Linear): assert isinstance( curr_mod, act_cls ), f"Incorrect activation class: {curr_mod} should be {act_cls}" def test_gather_from_rows_cols(): rng = np.random.default_rng(0) generator = torch.Generator() generator.manual_seed(0) for _ in range(100): batch_size = rng.integers(1, 8) num_rows = rng.integers(1, 20) num_cols = rng.integers(1, 20) num_points = rng.integers(1, 100) matrix = torch.randn((batch_size, num_rows, num_cols)) rows = torch.randint(0, num_rows, size=(batch_size, num_points)) cols = torch.randint(0, num_cols, size=(batch_size, num_points)) res = thutils.gather_from_rows_cols(matrix, rows, cols) assert res.shape == (batch_size, num_points) for i in range(batch_size): for j in range(num_points): assert torch.allclose(res[i, j], matrix[i, rows[i, j], cols[i, j]])
Python/03 - Strings/06 - String Validators.py
srgeyK87/Hacker-Rank-30-days-challlenge
275
11127806
<gh_stars>100-1000 # ======================== # Information # ======================== # Direct Link: https://www.hackerrank.com/challenges/string-validators/problem # Difficulty: Easy # Max Score: 10 # Language: Python # ======================== # Solution # ======================== if __name__ == '__main__': s = input() print(any(i.isalnum() for i in s)) print(any(i.isalpha() for i in s)) print(any(i.isdigit() for i in s)) print(any(i.islower() for i in s)) print(any(i.isupper() for i in s))
watermask_remover_and_split_data/tools/preprocess_for_test.py
ericosmic/2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement
886
11127853
<filename>watermask_remover_and_split_data/tools/preprocess_for_test.py import cv2 import numpy as np import os from multiprocessing import Pool def _resize_image(img, dst_height): h_old = img.shape[0] w_old = img.shape[1] height = dst_height width = int(w_old * height / h_old) resized_img = cv2.resize(img, (width, height), interpolation=cv2.INTER_CUBIC) return resized_img def preprocess_one_img(img): resize_img = _resize_image(img, 32) # 修改图片的高度 # 对图片进行滤波处理 resize_img = cv2.normalize(resize_img, dst=None, alpha=230, beta=20, norm_type=cv2.NORM_MINMAX) resize_img = cv2.bilateralFilter(src=resize_img, d=3, sigmaColor=200, sigmaSpace=10) resize_img = cv2.filter2D(resize_img, -1, kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32)) return resize_img def cv_imread(image_path): cv_img = cv2.imdecode(np.fromfile(image_path, dtype=np.uint8), -1) return cv_img def cv_imwrite(write_path, img): cv2.imencode('.jpg', img, )[1].tofile(write_path) return def preprocess_imgs(img_path, save_path, pool_num): """ :param img_path: 处理的图片路径 :param save_path: 保存路径 :param pool_num: 处理进程数 描述:主要是对图片进行滤波处理和尺寸变换(模型对输入图片的尺寸有求),提高识别的准确率 """ img_names = os.listdir(img_path) if not os.path.exists(img_path): print("not exists ", img_path, " exit...") if not os.path.exists(save_path): os.makedirs(save_path) params = [] for img_name in img_names: params.append((img_path, save_path, img_name)) if pool_num > 0: pool = Pool(pool_num) pool.map(pre_run, params) pool.close() pool.join() else: for param in params: pre_run(param) def pre_run(params): run(params[0], params[1], params[2]) def run(img_path, save_path, img_name): img = cv_imread(os.path.join(img_path, img_name)) img_blurred = preprocess_one_img(img) cv_imwrite(os.path.join(save_path, img_name), img_blurred)
chrome/common/extensions/docs/server2/cache_chain_object_store.py
nagineni/chromium-crosswalk
231
11127887
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from future import Future from object_store import ObjectStore class _GetMultiFuture(object): '''A Future for GetMulti. Params: - |toplevel_cache| CacheChainObjectStore's cache. - |object_store_futures| a list of (object store, future) pairs, where future is the result of calling GetMulti on the missing keys for the object store. - |cached_items| a mapping of cache items already in memory. - |missing_keys| the keys that were missing from the GetMulti call ''' def __init__(self, toplevel_cache, object_store_futures, cached_items, missing_keys): self._toplevel_cache = toplevel_cache self._object_store_futures = object_store_futures self._results_so_far = cached_items self._missing_keys = missing_keys def Get(self): # Approach: # # Try each object store in order, until there are no more missing keys. # Don't realise the Future value of an object store that we don't need to; # this is important e.g. to avoid querying data store constantly. # # When a value is found, cache it in all object stores further up the # chain, including the object-based cache on CacheChainObjectStore. object_store_updates = [] for object_store, object_store_future in self._object_store_futures: if len(self._missing_keys) == 0: break result = object_store_future.Get() for k, v in result.items(): # use items(); changes during iteration if v is None or k not in self._missing_keys: del result[k] continue self._toplevel_cache[k] = v self._results_so_far[k] = v self._missing_keys.remove(k) for _, updates in object_store_updates: updates.update(result) object_store_updates.append((object_store, {})) # Update the caches of all object stores that need it. for object_store, updates in object_store_updates: if updates: object_store.SetMulti(updates) return self._results_so_far class CacheChainObjectStore(ObjectStore): '''Maintains an in-memory cache along with a chain of other object stores to try for the same keys. This is useful for implementing a multi-layered cache. The in-memory cache is inbuilt since it's synchronous, but the object store interface is asynchronous. The rules for the object store chain are: - When setting (or deleting) items, all object stores in the hierarcy will have that item set. - When getting items, the behaviour depends on |start_empty|. - If false, each object store is tried in order. The first object store to find the item will trickle back up, setting it on all object stores higher in the hierarchy. - If true, only the first in-memory cache is checked, as though the store had been initialized with no content as opposed to the union of its delegate stores. ''' def __init__(self, object_stores, start_empty=False): self._object_stores = object_stores self._start_empty = start_empty self._cache = {} def SetMulti(self, mapping): self._cache.update(mapping) for object_store in self._object_stores: object_store.SetMulti(mapping) def GetMulti(self, keys): missing_keys = list(keys) cached_items = {} for key in keys: if key in self._cache: cached_items[key] = self._cache.get(key) missing_keys.remove(key) if len(missing_keys) == 0 or self._start_empty: return Future(value=cached_items) object_store_futures = [(object_store, object_store.GetMulti(missing_keys)) for object_store in self._object_stores] return Future(delegate=_GetMultiFuture( self._cache, object_store_futures, cached_items, missing_keys)) def DelMulti(self, keys): for k in keys: self._cache.pop(k, None) for object_store in self._object_stores: object_store.DelMulti(keys)
networks/scitile/pd_solve/gradtest.py
ZhouXiaolin/plaidml
4,535
11127916
import numpy as np import os import sys import plaidml2 import plaidml2.edsl as edsl import plaidml2.exec as pld_exec import plaidml2.op as op import unittest import numpy.testing as npt def matmul_2_2(A, B): I, J, K = edsl.TensorDims(3) i, j, k = edsl.TensorIndexes(3) A.bind_dims(I, J) B.bind_dims(J, K) C = edsl.TensorOutput(I, K) C[(i, k)] += A[i, j] * B[j, k] return C def matmul_2_1(A, b): I, J = edsl.TensorDims(2) i, j = edsl.TensorIndexes(2) A.bind_dims(I, J) b.bind_dims(J) C = edsl.TensorOutput(I) C[(i)] += A[i, j] * b[j] return C def dist(a, b): I, J = edsl.TensorDims(2) i, j = edsl.TensorIndexes(2) a.bind_dims(I) neg = -b neg.bind_dims(J) C = edsl.TensorOutput(I, J) C[(i, j)] = a[i] + neg[j] return C def get_jacobian(Is, I_dat, O, wrt): dy = edsl.jacobian(O, [wrt])[0] program = edsl.Program('program', [O, dy]) binder = pld_exec.Binder(program) executable = binder.compile() for i in range(len(Is)): binder.input(Is[i]).copy_from_ndarray(I_dat[i]) executable.run() return binder.output(dy).as_ndarray() class GradTest(unittest.TestCase): def test_ident(self): np_x = np.array([1, 2, 3]) dtype = plaidml2.DType.FLOAT32 x = edsl.Tensor(edsl.LogicalShape(dtype, np_x.shape)) test_result = get_jacobian([x], [np_x], x, x) true_result = np.eye(3) npt.assert_allclose(test_result, true_result) def test_square(self): np_x = np.array([1, 2, 3]) dtype = plaidml2.DType.FLOAT32 x = edsl.Tensor(edsl.LogicalShape(dtype, np_x.shape)) y = op.square(x) test_result = get_jacobian([x], [np_x], y, x) true_result = np.array([[2, 0, 0], [0, 4, 0], [0, 0, 6]]) npt.assert_allclose(test_result, true_result) def test_assign(self): np_x = np.array([1, 2, 3]) np_b = np.array([1, 1, 1]) dtype = plaidml2.DType.FLOAT32 x = edsl.Tensor(edsl.LogicalShape(dtype, np_x.shape)) b = edsl.Tensor(edsl.LogicalShape(dtype, np_b.shape)) y = op.square(dist(x, b)) test_result = get_jacobian([x, b], [np_x, np_b], y, x) true_result = np.zeros((3, 3, 3)) true_result[0, :, 0] = 0 true_result[1, :, 1] = 2 true_result[2, :, 2] = 4 npt.assert_allclose(test_result, true_result) def test_matmul_2_1(self): np_A = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]) np_x = np.array([1., 2., 3.]) dtype = plaidml2.DType.FLOAT32 A = edsl.Tensor(edsl.LogicalShape(dtype, np_A.shape)) x = edsl.Tensor(edsl.LogicalShape(dtype, np_x.shape)) y = matmul_2_1(A, x) test_result = get_jacobian([A, x], [np_A, np_x], y, x) true_result = np_A npt.assert_allclose(test_result, true_result) def test_matmul_2_2(self): np_A = np.array([[1., 2.], [3., 4.]]) np_x = np.array([[5., 6.], [7., 8.]]) dtype = plaidml2.DType.FLOAT32 A = edsl.Tensor(edsl.LogicalShape(dtype, np_A.shape)) x = edsl.Tensor(edsl.LogicalShape(dtype, np_x.shape)) y = matmul_2_2(A, x) test_result = get_jacobian([A, x], [np_A, np_x], y, x) true_result = np.array([[[[1, 0], [2, 0]], [[0, 1], [0, 2]]], [[[3, 0], [4, 0]], [[0, 3], [0, 4]]]]) npt.assert_allclose(test_result, true_result) def test_chain(self): np_x = np.array([1., 2., 3.]) np_A = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]) dtype = plaidml2.DType.FLOAT32 A = edsl.Tensor(edsl.LogicalShape(dtype, np_A.shape)) x = edsl.Tensor(edsl.LogicalShape(dtype, np_x.shape)) y = matmul_2_2(A, dist(x, x)) J_test = get_jacobian([A, x], [np_A, np_x], y, x) J_true = np.zeros((3, 3, 3)) J_true[:, :, 0] = [[-5, 1, 1], [-11, 4, 4], [-17, 7, 7]] J_true[:, :, 1] = [[2, -4, 2], [5, -10, 5], [8, -16, 8]] J_true[:, :, 2] = [[3, 3, -3], [6, 6, -9], [9, 9, -15]] npt.assert_allclose(J_true, J_test) if __name__ == '__main__': unittest.main()
mpf/commands/build.py
haggispinball/mpf_fathom_fast
163
11127958
<filename>mpf/commands/build.py """Command to build artifacts for non-dev operations.""" import pickle import argparse from mpf.core.utility_functions import Util from mpf.core.config_loader import YamlMultifileConfigLoader, ProductionConfigLoader from mpf.commands import MpfCommandLineParser SUBCOMMAND = True class Command(MpfCommandLineParser): """Build artifacts.""" def __init__(self, args, path): """Parse args.""" command_name = args.pop(1) super().__init__(args=args, path=path) machine_path, remaining_args = self.parse_args() self.machine_path = machine_path self.args = remaining_args parser = argparse.ArgumentParser( description='Build MPF production config.') parser.add_argument("-c", action="store", dest="configfile", default="config.yaml", metavar='config_file', help="The name of a config file to load. Default " "is " "config.yaml. Multiple files can be used " "via a comma-" "separated list (no spaces between)") parser.add_argument("-b", action="store_false", dest="mc", default=True, help="Builds a production config for MPF only, without MC.") parser.add_argument("--dest-path", action="store", dest="dest_path", default=False, help="Path to set as machine_path on the production bundle. May " "be different than the machine_path on the current machine.") self.args = parser.parse_args(remaining_args) self.args.configfile = Util.string_to_event_list(self.args.configfile) method = getattr(self, command_name) method() def production_bundle(self): """Create a production bundle.""" config_loader = YamlMultifileConfigLoader(self.machine_path, self.args.configfile, False, False) mpf_config = config_loader.load_mpf_config() if self.args.mc: mc_config = config_loader.load_mc_config() if self.args.dest_path: mpf_config.set_machine_path(self.args.dest_path) pickle.dump(mpf_config, open(ProductionConfigLoader.get_mpf_bundle_path(self.machine_path), "wb")) if self.args.mc: pickle.dump(mc_config, open(ProductionConfigLoader.get_mpf_mc_bundle_path(self.machine_path), "wb")) print("Success.")
admintools/utils.py
goztrk/django-htk
206
11127968
# Python Standard Library Imports # Third Party (PyPI) Imports import rollbar # HTK Imports from htk.admintools.cachekeys import HtkCompanyEmployeesCache from htk.admintools.cachekeys import HtkCompanyOfficersCache from htk.apps.accounts.utils import get_user_by_email from htk.utils import htk_setting from htk.utils.request import get_current_request def get_company_officers_id_email_map(): """Gets a mapping of company officers Returns a dictionary mapping User ids to emails """ c = HtkCompanyOfficersCache() officers_map = c.get() if officers_map is None: officers_map = {} for email in htk_setting('HTK_COMPANY_OFFICER_EMAILS'): user = get_user_by_email(email) if user: officers_map[user.id] = email c.cache_store(officers_map) return officers_map def get_company_employees_id_email_map(): """Gets a mapping of company employees Returns a dictionary mapping User ids to emails """ c = HtkCompanyEmployeesCache() employees_map = c.get() if employees_map is None: employees_map = {} for email in htk_setting('HTK_COMPANY_EMPLOYEE_EMAILS'): user = get_user_by_email(email) if user: employees_map[user.id] = email c.cache_store(employees_map) return employees_map def is_allowed_to_emulate_users(user): """Determines whether `user` is allowed to emulate other users """ allowed = False if user is not None and user.is_authenticated: try: user_profile = user.profile if user_profile.is_company_officer: allowed = True except: request = get_current_request() rollbar.report_exc_info(request=request) return allowed def is_allowed_to_emulate(original_user, targeted_user): """Determines whether `original_user` is allowed to emulate `targeted_user` """ allowed = original_user and \ is_allowed_to_emulate_users(original_user) and \ targeted_user and \ not targeted_user.profile.is_company_officer return allowed
unittest/python/bindings.py
thanhndv212/pinocchio
716
11128011
<reponame>thanhndv212/pinocchio import unittest import pinocchio as pin from pinocchio.utils import np, npl, rand, zero from test_case import PinocchioTestCase as TestCase # This whole file seems to be outdated and superseded by more recent tests # Probably it should be removed and its contents moved or split somewhere else class TestSE3(TestCase): def setUp(self): self.R = rand([3, 3]) self.R, _, _ = npl.svd(self.R) self.p = rand(3) self.m = pin.SE3(self.R, self.p) def test_se3(self): R, p, m = self.R, self.p, self.m X = np.vstack([np.hstack([R, pin.skew(p).dot(R)]), np.hstack([zero([3, 3]), R])]) self.assertApprox(m.action, X) M = np.vstack([np.hstack([R, np.expand_dims(p,1)]), np.array([[0., 0., 0., 1.]])]) self.assertApprox(m.homogeneous, M) m2 = pin.SE3.Random() self.assertApprox((m * m2).homogeneous, m.homogeneous.dot(m2.homogeneous)) self.assertApprox((~m).homogeneous, npl.inv(m.homogeneous)) p = rand(3) self.assertApprox(m * p, m.rotation.dot(p) + m.translation) self.assertApprox(m.actInv(p), m.rotation.T.dot(p) - m.rotation.T.dot(m.translation)) # Currently, the different cases do not throw the same exception type. # To have a more robust test, only Exception is checked. # In the comments, the most specific actual exception class at the time of writing p = rand(5) with self.assertRaises(Exception): # RuntimeError m * p with self.assertRaises(Exception): # RuntimeError m.actInv(p) with self.assertRaises(Exception): # Boost.Python.ArgumentError (subclass of TypeError) m.actInv('42') def test_motion(self): m = self.m self.assertApprox(pin.Motion.Zero().vector, zero(6)) v = pin.Motion.Random() self.assertApprox((m * v).vector, m.action.dot(v.vector)) self.assertApprox((m.actInv(v)).vector, npl.inv(m.action).dot(v.vector)) vv = v.linear vw = v.angular self.assertApprox(v.vector, np.concatenate([vv, vw])) self.assertApprox((v ^ v).vector, zero(6)) def test_force(self): m = self.m self.assertApprox(pin.Force.Zero().vector, zero(6)) f = pin.Force.Random() ff = f.linear ft = f.angular self.assertApprox(f.vector, np.concatenate([ff, ft])) self.assertApprox((m * f).vector, npl.inv(m.action.T).dot(f.vector)) self.assertApprox((m.actInv(f)).vector, m.action.T.dot(f.vector)) v = pin.Motion.Random() f = pin.Force(np.concatenate([v.vector[3:], v.vector[:3]])) self.assertApprox((v ^ f).vector, zero(6)) def test_inertia(self): m = self.m Y1 = pin.Inertia.Random() Y2 = pin.Inertia.Random() Y = Y1 + Y2 self.assertApprox(Y1.matrix() + Y2.matrix(), Y.matrix()) v = pin.Motion.Random() self.assertApprox((Y * v).vector, Y.matrix().dot(v.vector)) self.assertApprox((m * Y).matrix(), m.inverse().action.T.dot(Y.matrix()).dot(m.inverse().action)) self.assertApprox((m.actInv(Y)).matrix(), m.action.T.dot(Y.matrix()).dot(m.action)) def test_cross(self): m = pin.Motion.Random() f = pin.Force.Random() self.assertApprox(m ^ m, m.cross(m)) self.assertApprox(m ^ f, m.cross(f)) with self.assertRaises(TypeError): m ^ 2 if __name__ == '__main__': unittest.main()
models/layers.py
tjtanaa/ml-gsn
202
11128015
import math import torch from torch import nn import torch.nn.functional as F from .op import FusedLeakyReLU, fused_leaky_relu, upfirdn2d class PixelNorm(nn.Module): """Pixel normalization layer. Normalizes feature maps along the pixel dimension. Used to prevent explosion of pixel magnitude. """ def __init__(self): super().__init__() def forward(self, input): return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8) class ConstantInput(nn.Module): """Constant input layer. A learned constant input used to start the generation. Args: ---- channel: int Number of channels. size: int Spatial dimension of constant input. """ def __init__(self, channel, size=4, ndim=2): super().__init__() res = (size,) * ndim self.input = nn.Parameter(torch.randn(1, channel, *res)) def forward(self, input): batch = input.shape[0] # out = self.input.repeat(batch, 1, 1, 1) out = torch.repeat_interleave(self.input, batch, dim=0) return out def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k class Blur(nn.Module): """Blur layer. Applies a blur kernel to input image using finite impulse response filter. Blurring feature maps after convolutional upsampling or before convolutional downsampling helps produces models that are more robust to shifting inputs (https://richzhang.github.io/antialiased-cnns/). In the context of GANs, this can provide cleaner gradients, and therefore more stable training. Args: ---- kernel: list, int A list of integers representing a blur kernel. For exmaple: [1, 3, 3, 1]. pad: tuple, int A tuple of integers representing the number of rows/columns of padding to be added to the top/left and the bottom/right respectively. upsample_factor: int Upsample factor. """ def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * (upsample_factor ** 2) self.register_buffer("kernel", kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class Upsample(nn.Module): """Upsampling layer. Perform upsampling using a blur kernel. Args: ---- kernel: list, int A list of integers representing a blur kernel. For exmaple: [1, 3, 3, 1]. factor: int Upsampling factor. """ def __init__(self, kernel=[1, 3, 3, 1], factor=2): super().__init__() self.factor = factor kernel = make_kernel(kernel) * (factor ** 2) self.register_buffer("kernel", kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = (pad0, pad1) def forward(self, input): out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad) return out class Downsample(nn.Module): """Downsampling layer. Perform downsampling using a blur kernel. Args: ---- kernel: list, int A list of integers representing a blur kernel. For exmaple: [1, 3, 3, 1]. factor: int Downsampling factor. """ def __init__(self, kernel=[1, 3, 3, 1], factor=2): super().__init__() self.factor = factor kernel = make_kernel(kernel) self.register_buffer("kernel", kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 pad1 = p // 2 self.pad = (pad0, pad1) def forward(self, input): out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad) return out class EqualLinear(nn.Module): """Linear layer with equalized learning rate. During the forward pass the weights are scaled by the inverse of the He constant (i.e. sqrt(in_dim)) to prevent vanishing gradients and accelerate training. This constant only works for ReLU or LeakyReLU activation functions. Args: ---- in_channel: int Input channels. out_channel: int Output channels. bias: bool Use bias term. bias_init: float Initial value for the bias. lr_mul: float Learning rate multiplier. By scaling weights and the bias we can proportionally scale the magnitude of the gradients, effectively increasing/decreasing the learning rate for this layer. activate: bool Apply leakyReLU activation. """ def __init__(self, in_channel, out_channel, bias=True, bias_init=0, lr_mul=1, activate=False): super().__init__() self.weight = nn.Parameter(torch.randn(out_channel, in_channel).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_channel).fill_(bias_init)) else: self.bias = None self.activate = activate self.scale = (1 / math.sqrt(in_channel)) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activate: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})" class EqualConv2d(nn.Module): """2D convolution layer with equalized learning rate. During the forward pass the weights are scaled by the inverse of the He constant (i.e. sqrt(in_dim)) to prevent vanishing gradients and accelerate training. This constant only works for ReLU or LeakyReLU activation functions. Args: ---- in_channel: int Input channels. out_channel: int Output channels. kernel_size: int Kernel size. stride: int Stride of convolutional kernel across the input. padding: int Amount of zero padding applied to both sides of the input. bias: bool Use bias term. """ def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True): super().__init__() self.weight = nn.Parameter(torch.randn(out_channel, in_channel, kernel_size, kernel_size)) self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2) self.stride = stride self.padding = padding if bias: self.bias = nn.Parameter(torch.zeros(out_channel)) else: self.bias = None def forward(self, input): out = F.conv2d(input, self.weight * self.scale, bias=self.bias, stride=self.stride, padding=self.padding) return out def __repr__(self): return ( f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]}," f" {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})" ) class EqualConvTranspose2d(nn.Module): """2D transpose convolution layer with equalized learning rate. During the forward pass the weights are scaled by the inverse of the He constant (i.e. sqrt(in_dim)) to prevent vanishing gradients and accelerate training. This constant only works for ReLU or LeakyReLU activation functions. Args: ---- in_channel: int Input channels. out_channel: int Output channels. kernel_size: int Kernel size. stride: int Stride of convolutional kernel across the input. padding: int Amount of zero padding applied to both sides of the input. output_padding: int Extra padding added to input to achieve the desired output size. bias: bool Use bias term. """ def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=0, output_padding=0, bias=True): super().__init__() self.weight = nn.Parameter(torch.randn(in_channel, out_channel, kernel_size, kernel_size)) self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2) self.stride = stride self.padding = padding self.output_padding = output_padding if bias: self.bias = nn.Parameter(torch.zeros(out_channel)) else: self.bias = None def forward(self, input): out = F.conv_transpose2d( input, self.weight * self.scale, bias=self.bias, stride=self.stride, padding=self.padding, output_padding=self.output_padding, ) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[0]}, {self.weight.shape[1]},' f' {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})' ) class ConvLayer2d(nn.Sequential): def __init__( self, in_channel, out_channel, kernel_size=3, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1], bias=True, activate=True, ): assert not (upsample and downsample), 'Cannot upsample and downsample simultaneously' layers = [] if upsample: factor = 2 p = (len(blur_kernel) - factor) - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 layers.append( EqualConvTranspose2d( in_channel, out_channel, kernel_size, padding=0, stride=2, bias=bias and not activate ) ) layers.append(Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor)) if downsample: factor = 2 p = (len(blur_kernel) - factor) + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 layers.append(Blur(blur_kernel, pad=(pad0, pad1))) layers.append( EqualConv2d(in_channel, out_channel, kernel_size, padding=0, stride=2, bias=bias and not activate) ) if (not downsample) and (not upsample): padding = kernel_size // 2 layers.append( EqualConv2d(in_channel, out_channel, kernel_size, padding=padding, stride=1, bias=bias and not activate) ) if activate: layers.append(FusedLeakyReLU(out_channel, bias=bias)) super().__init__(*layers) class ConvResBlock2d(nn.Module): """2D convolutional residual block with equalized learning rate. Residual block composed of 3x3 convolutions and leaky ReLUs. Args: ---- in_channel: int Input channels. out_channel: int Output channels. upsample: bool Apply upsampling via strided convolution in the first conv. downsample: bool Apply downsampling via strided convolution in the second conv. """ def __init__(self, in_channel, out_channel, upsample=False, downsample=False): super().__init__() assert not (upsample and downsample), 'Cannot upsample and downsample simultaneously' mid_ch = in_channel if downsample else out_channel self.conv1 = ConvLayer2d(in_channel, mid_ch, upsample=upsample, kernel_size=3) self.conv2 = ConvLayer2d(mid_ch, out_channel, downsample=downsample, kernel_size=3) if (in_channel != out_channel) or upsample or downsample: self.skip = ConvLayer2d( in_channel, out_channel, upsample=upsample, downsample=downsample, kernel_size=1, activate=False, bias=False, ) def forward(self, input): out = self.conv1(input) out = self.conv2(out) if hasattr(self, 'skip'): skip = self.skip(input) out = (out + skip) / math.sqrt(2) else: out = (out + input) / math.sqrt(2) return out class ModulationLinear(nn.Module): """Linear modulation layer. This layer is inspired by the modulated convolution layer from StyleGAN2, but adapted to linear layers. Args: ---- in_channel: int Input channels. out_channel: int Output channels. z_dim: int Latent dimension. demodulate: bool Demudulate layer weights. activate: bool Apply LeakyReLU activation to layer output. bias: bool Add bias to layer output. """ def __init__( self, in_channel, out_channel, z_dim, demodulate=True, activate=True, bias=True, ): super().__init__() self.eps = 1e-8 self.in_channel = in_channel self.out_channel = out_channel self.z_dim = z_dim self.demodulate = demodulate self.scale = 1 / math.sqrt(in_channel) self.weight = nn.Parameter(torch.randn(out_channel, in_channel)) self.modulation = EqualLinear(z_dim, in_channel, bias_init=1, activate=False) if activate: # FusedLeakyReLU includes a bias term self.activate = FusedLeakyReLU(out_channel, bias=bias) elif bias: self.bias = nn.Parameter(torch.zeros(1, out_channel)) def __repr__(self): return f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, z_dim={self.z_dim})' def forward(self, input, z): # feature modulation gamma = self.modulation(z) # B, in_ch input = input * gamma weight = self.weight * self.scale if self.demodulate: # weight is out_ch x in_ch # here we calculate the standard deviation per input channel demod = torch.rsqrt(weight.pow(2).sum([1]) + self.eps) weight = weight * demod.view(-1, 1) # also normalize inputs input_demod = torch.rsqrt(input.pow(2).sum([1]) + self.eps) input = input * input_demod.view(-1, 1) out = F.linear(input, weight) if hasattr(self, 'activate'): out = self.activate(out) if hasattr(self, 'bias'): out = out + self.bias return out class ModulatedConv2d(nn.Module): """2D convolutional modulation layer. This layer was originally proposed in StyleGAN2 (https://arxiv.org/pdf/1912.04958.pdf) as a replacement for Adaptive Instance Normalization (AdaIN), which was shown to produce artifacts in generated samples. Args: ---- in_channel: int Input channels. out_channel: int Output channels. kernel_size: int Size of the convolutional kernel. z_dim: int Dimension of the latent code. demodulate: bool Demodulate layer weights. upsample: bool Output will be 2x scale of inputs. downsample: bool Outputs will be 0.5x scale of inputs. blur_kernel: list, int List of ints representing the blur kernel to use for blurring before/after convolution. activate: bool Apply LeakyReLU activation to layer output. bias: bool Add bias to layer output. """ def __init__( self, in_channel, out_channel, kernel_size, z_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1], activate=True, bias=True, ): super().__init__() self.eps = 1e-8 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.z_dim = z_dim self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = (len(blur_kernel) - factor) - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor) if downsample: factor = 2 p = (len(blur_kernel) - factor) + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(z_dim, in_channel, bias_init=1) self.demodulate = demodulate if activate: # FusedLeakyReLU includes a bias term self.activate = FusedLeakyReLU(out_channel, bias=bias) elif bias: self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1)) def __repr__(self): return ( f"{self.__class__.__name__}({self.in_channel}, {self.out_channel}, kernel_size={self.kernel_size}, " f"z_dim={self.z_dim}, upsample={self.upsample}, downsample={self.downsample})" ) def forward(self, input, z): batch, in_channel, height, width = input.shape gamma = self.modulation(z).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * gamma if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape( batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size ) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) if hasattr(self, 'activate'): out = self.activate(out) if hasattr(self, 'bias'): out = out + self.bias return out class ToRGB(nn.Module): """Output aggregation layer. In the original StyleGAN2 this layer aggregated RGB predictions across all resolutions, but it's been slightly adjusted here to work with outputs of any dimension. Args: ---- in_channel: int Input channels. out_channel: int Output channels. z_dim: int Latent code dimension. upsample: bool Upsample the aggregated outputs. """ def __init__(self, in_channel, out_channel, z_dim, upsample=True): super().__init__() if upsample: self.upsample = Upsample() self.conv = ModulatedConv2d( in_channel=in_channel, out_channel=out_channel, kernel_size=1, z_dim=z_dim, demodulate=False, activate=False, bias=True, ) def forward(self, input, z, skip=None): out = self.conv(input, z) if skip is not None: skip = self.upsample(skip) out = out + skip return out class ConvRenderBlock2d(nn.Module): """2D convolutional neural rendering block. This block takes a feature map generated from a NeRF-style MLP and upsamples it to a higher resolultion image, as done in GIRAFFE (https://arxiv.org/pdf/2011.12100.pdf). Inspired by StyleGAN2, this module uses skip connections (by summing RGB outputs at each layer) to improve gradient flow. GIRAFFE specifically uses small convolutional kernels a single conv layer per block to "avoid entangling global scene properties". Args: ---- in_channel: int Input channels. out_channel: int Output channels. mode: str Whether to use original GIRAFFE implementation or the modified implementation. Modified implementation uses conv transpose + blur as a learnable upsampling kernel, and replaces bilinear upsampling with blur upsampling. This is mainly so that we have consistency with the rest of the model. deep: bool Apply two convolutional layers in succession, as in StyleGAN2. Otherwise only apply a single convolution layer, as in GIRAFFE. """ def __init__(self, in_channel, out_channel, mode='blur', deep=False): super().__init__() self.mode = mode self.deep = deep # the first conv layer doesn't have bias because it is fused with the leakyReLU activation if mode == 'original': self.conv = EqualConv2d(in_channel, out_channel, kernel_size=3, stride=1, padding=1, bias=False) else: self.conv = EqualConvTranspose2d(in_channel, out_channel, kernel_size=3, stride=2, padding=0, bias=False) self.blur = Blur(kernel=[1, 3, 3, 1], pad=(1, 1), upsample_factor=2) self.skip_upsample = Upsample(kernel=[1, 3, 3, 1], factor=2) if deep: self.conv2 = EqualConv2d(out_channel, out_channel, kernel_size=3, stride=1, padding=1, bias=False) self.activation = FusedLeakyReLU(out_channel, bias=True) self.toRGB = EqualConv2d(out_channel, 3, kernel_size=3, stride=1, padding=1, bias=True) def forward(self, x, skip=None): if self.mode == 'original': x = torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest') x = self.conv(x) else: x = self.conv(x) x = self.blur(x) x = self.activation(x) if self.deep: x = self.conv2(x) x = self.activation(x) rgb = self.toRGB(x) if skip is not None: if self.mode == 'original': skip = torch.nn.functional.interpolate(skip, scale_factor=2, mode='bilinear', align_corners=False) else: skip = self.skip_upsample(skip) rgb = rgb + skip return x, rgb class PositionalEncoding(nn.Module): """Positional encoding layer. Positionally encode inputs by projecting them through sinusoidal functions at multiple frequencies. Frequencies are scaled logarithmically. The original input is also included in the output so that the absolute position information is not lost. Args: ---- in_dim: int Input dimension. frequency_bands: int Number of frequencies to encode input into. """ def __init__(self, in_dim, frequency_bands=6, include_input=True): super().__init__() self.in_dim = in_dim if include_input: self.out_dim = in_dim + (2 * frequency_bands * in_dim) else: self.out_dim = 2 * frequency_bands * in_dim self.frequency_bands = frequency_bands self.include_input = include_input freqs = 2.0 ** torch.linspace(0.0, frequency_bands - 1, frequency_bands, dtype=torch.float) self.freqs = torch.nn.Parameter(freqs, requires_grad=False) def forward(self, x): if self.include_input: encoding = [x] else: encoding = [] for freq in self.freqs: for func in [torch.sin, torch.cos]: encoding.append(func(x * freq)) encoding = torch.cat(encoding, dim=-1) return encoding
src/python/model/model.py
annihilatethee/seedsync
255
11128020
<gh_stars>100-1000 # Copyright 2017, <NAME>, All rights reserved. import logging from abc import ABC, abstractmethod from typing import Set # my libs from common import AppError from .file import ModelFile class ModelError(AppError): """ Exception indicating a model error """ pass class IModelListener(ABC): """ Interface to listen to model events """ @abstractmethod def file_added(self, file: ModelFile): """ Event indicating a file was added to the model :param file: :return: """ pass @abstractmethod def file_removed(self, file: ModelFile): """ Event indicating that the given file was removed from the model :param file: :return: """ pass @abstractmethod def file_updated(self, old_file: ModelFile, new_file: ModelFile): """ Event indicating that the given file was updated :param old_file: :param new_file: :return: """ pass class Model: """ Represents the entire state of lftp """ def __init__(self): self.logger = logging.getLogger("Model") self.__files = {} # name->LftpFile self.__listeners = [] def set_base_logger(self, base_logger: logging.Logger): self.logger = base_logger.getChild("Model") def add_listener(self, listener: IModelListener): """ Add a model listener :param listener: :return: """ self.logger.debug("LftpModel: Adding a listener") if listener not in self.__listeners: self.__listeners.append(listener) def remove_listener(self, listener: IModelListener): """ Add a model listener :param listener: :return: """ self.logger.debug("LftpModel: Removing a listener") if listener not in self.__listeners: self.logger.error("LftpModel: listener does not exist!") else: self.__listeners.remove(listener) def add_file(self, file: ModelFile): """ Add a file to the model :param file: :return: """ self.logger.debug("LftpModel: Adding file '{}'".format(file.name)) if file.name in self.__files: raise ModelError("File already exists in the model") self.__files[file.name] = file for listener in self.__listeners: listener.file_added(self.__files[file.name]) def remove_file(self, filename: str): """ Remove the file from the model :param filename: :return: """ self.logger.debug("LftpModel: Removing file '{}'".format(filename)) if filename not in self.__files: raise ModelError("File does not exist in the model") file = self.__files[filename] del self.__files[filename] for listener in self.__listeners: listener.file_removed(file) def update_file(self, file: ModelFile): """ Update an already existing file :param file: :return: """ self.logger.debug("LftpModel: Updating file '{}'".format(file.name)) if file.name not in self.__files: raise ModelError("File does not exist in the model") old_file = self.__files[file.name] new_file = file self.__files[file.name] = new_file for listener in self.__listeners: listener.file_updated(old_file, new_file) def get_file(self, name: str) -> ModelFile: """ Returns a copy of the file of the given name :param name: :return: """ if name not in self.__files: raise ModelError("File does not exist in the model") return self.__files[name] def get_file_names(self) -> Set[str]: return set(self.__files.keys())
December-10/python_UjjwalPrahladka_Dec10.py
UjjwalPrahladka/A-December-of-Algorithms-2019
228
11128025
<gh_stars>100-1000 def cookieCount(n,p,c): no_of_cookies = 0 no_of_jars = 0 no_of_cookies = no_of_jars = n // p while no_of_jars >= c: no_of_jars -= c no_of_cookies += 1 return no_of_cookies n,p,c = input('Enter n p c: ').split() print('Total Cookies Eaten: {}'.format(cookieCount(int(n), int(p), int(c))))
libgsync/filter.py
iwonbigbro/gsync
199
11128036
#!/usr/bin/env python # -*- coding: utf8 -*- # Copyright (C) 2013 <NAME>. All rights reserved. """ Defines the filter feature of gsync, as specified by --filter like options. """ import re, fnmatch from libgsync.output import debug RULEMOD_PAIRS = [ ("exclude", "-"), ("include", "+"), ("hide", "H"), ("show", "S"), ("protect", "P"), ("risk", "R"), ("dir-merge", ":"), ("merge", "."), ] RULES = r"(%s)" % "|".join([ r for r, m in RULEMOD_PAIRS ]) MODIFIERS = r"([%s])" % "".join([ m for r, m in RULEMOD_PAIRS ]) EXPR_RULE_MOD_PATTERN = r"\s*%s,\s*%s\s*(\S+)" % (RULES, MODIFIERS) EXPR_RULE_PATTERN = r"\s*%s\s*(\S+)" % (RULES) EXPR_MOD_PATTERN = r"\s*,?\s*%s\s*(\S+)" % (MODIFIERS) EXPR_LIST = ( EXPR_RULE_MOD_PATTERN, EXPR_MOD_PATTERN, EXPR_RULE_PATTERN, ) class FilterException(Exception): """For exceptions that occur relating to filters or filtering.""" pass class FilterObject(object): """Defines a singleton loadable filter definition.""" def __init__(self): self.rules = [] self.pathcache = {} self.merge_dir = "" def get_modifier(self, path): """Returns a rule modifier that matches the given path""" modifer = self.pathcache.get(path) if modifer is None: return modifer for modifer, pattern in self.rules: if fnmatch.fnmatch(path, pattern): return self.pathcache.setdefault(path, modifer) return None def load_rules(self, path, modifier=""): """Loads filter rules from the file specified by 'path'.""" with open(path, "r") as fd: for line in fd: self.add_rule(modifier + " " + line) def add_rules(self, rules, modifier = ""): """ Adds rules to the filter object, specified with 'rules' and an optional modifier, where rules do not contain modifiers. """ for rule in rules: self.add_rule(modifier + " " + rule) def add_rule(self, rule_string): """ Adds a single rule to the filter object. """ match = None for expr in EXPR_LIST: match = re.match(expr, rule_string) if match is not None: break if match is None: return ngroups = len(match.groups()) debug("%s matched %d groups" % (repr(rule_string), ngroups)) debug(" * [%s]" % ",".join([ x if x else "" for x in match.groups() ])) if ngroups == 3: mod, pattern = match.groups(2, 3) elif ngroups == 2: mod, pattern = match.groups(1, 2) mod = mod[0].upper() if mod == "I": mod = "+" elif mod == "E": mod = "-" elif mod == "D": mod = ":" elif mod == "M": mod = "." else: raise FilterException("Invalid rule: %s" % rule_string) if mod == ":": self.merge_dir = pattern return if mod == ".": # Stop and load some more rules. self.load_rules(pattern) return self.rules.append((mod, pattern)) Filter = FilterObject() # pylint: disable-msg=C0103
tests/test_macros.py
SharpKoi/Kashgari
2,422
11128037
<reponame>SharpKoi/Kashgari # encoding: utf-8 # author: BrikerMan # contact: <EMAIL> # blog: https://eliyar.biz # file: test_macros.py # time: 3:23 下午 import random import logging from kashgari.macros import DATA_PATH from tensorflow.keras.utils import get_file from kashgari.corpus import ChineseDailyNerCorpus, SMP2018ECDTCorpus, JigsawToxicCommentCorpus logging.basicConfig(level='DEBUG') text_x = [ ['语', '言', '学', '是', '一', '门', '关', '于', '人', '类', '语', '言', '的', '科', '学', '研', '究', '。'], ['语', '言', '学', '包', '含', '了', '几', '种', '分', '支', '领', '域', '。'], ['在', '语', '言', '结', '构', '研', '究', '与', '意', '义', '研', '究', '之', '间', '存', '在', '一', '个', '重', '要', '的', '主', '题', '划', '分', '。'], ['语', '法', '中', '包', '含', '了', '词', '法', ',', '句', '法', '以', '及', '语', '音', '。'], ['语', '音', '学', '是', '语', '言', '学', '的', '一', '个', '相', '关', '分', '支', ',', '它', '涉', '及', '到', '语', '音', '与', '非', '语', '音', '声', '音', '的', '实', '际', '属', '性', ',', '以', '及', '它', '们', '是', '如', '何', '发', '出', '与', '被', '接', '收', '到', '的', '。'], ['与', '学', '习', '语', '言', '不', '同', ',', '语', '言', '学', '是', '研', '究', '所', '有', '人', '类', '语', '文', '发', '展', '有', '关', '的', '一', '门', '学', '术', '科', '目', '。'], ['在', '语', '言', '结', '构', '(', '语', '法', ')', '研', '究', '与', '意', '义', '(', '语', '义', '与', '语', '用', ')', '研', '究', '之', '间', '存', '在', '一', '个', '重', '要', '的', '主', '题', '划', '分'], ['语', '言', '学', '(', '英', '语', ':', 'l', 'i', 'n', 'g', 'u', 'i', 's', 't', 'i', 'c', 's', ')', '是', '一', '门', '关', '于', '人', '类', '语', '言', '的', '科', '学', '研', '究'], ['语', '言', '学', '(', '英', '语', ':', 'l', 'i', 'n', 'g', 'u', 'i', 's', 't', 'i', 'c', 's', ')', '是', '一', '门', '关', '于', '人', '类', '语', '言', '的', '科', '学', '研', '究'], ['语', '言', '学', '(', '英', '语', ':', 'l', 'i', 'n', 'g', 'u', 'i', 's', 't', 'i', 'c', 's', ')', '是', '一', '门', '关', '于', '人', '类', '语', '言', '的', '科', '学', '研', '究'], ['语', '言', '学', '包', '含', '了', '几', '种', '分', '支', '领', '域', '。'], ['在', '语', '言', '结', '构', '(', '语', '法', ')', '研', '究', '与', '意', '义', '(', '语', '义', '与', '语', '用', ')', '研', '究', '之', '间', '存', '在', '一', '个', '重', '要', '的', '主', '题', '划', '分'] ] multi_label_y = [ ['b', 'c'], ['a', 'b', 'c'], ['b', 'c'], ['a', 'b'], [], ['b'], ['a'], ['a', 'b', 'c'], ['a', 'b', 'c'], ['b'], ['a', 'b'], ['a', 'b'] ] ner_y = [ ['O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O'], ['O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O'], ['O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O'], ['O', 'O', 'O', 'B-1', 'I-1', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O'], ['O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-2', 'O', 'O', 'O', 'B-1', 'I-1', 'I-1', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O'], ['O', 'O', 'O', 'B-3', 'I-3', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O'], ['O', 'O', 'B-1', 'I-1', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O'], ['O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O'], ['O', 'O', 'O', 'B-1', 'I-1', 'I-1', 'O', 'O', 'O', 'O', 'B-1', 'I-1', 'I-1', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O'], ['O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-2', 'I-2', 'I-2', 'O', 'O', 'O', 'O', 'O'], ['O', 'B-3', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O'], ['O', 'O', 'B-3', 'I-3', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O']] class TestMacros: bert_path = get_file('bert_sample_model', "http://s3.bmio.net/kashgari/bert_sample_model.tar.bz2", cache_dir=DATA_PATH, untar=True) w2v_path = get_file('sample_w2v.txt', "http://s3.bmio.net/kashgari/sample_w2v.txt", cache_dir=DATA_PATH) jigsaw_mini_corpus_path = get_file('jigsaw-toxic-comment-corpus-mini.csv', "http://s3.bmio.net/kashgari/jigsaw-toxic-comment-corpus-mini.csv", cache_dir=DATA_PATH) jigsaw_mini_corpus = JigsawToxicCommentCorpus(jigsaw_mini_corpus_path) chinese_daily = ChineseDailyNerCorpus.load_data('valid') smp_corpus = SMP2018ECDTCorpus.load_data('valid') # Test data for issue https://github.com/BrikerMan/Kashgari/issues/187 custom_1 = (text_x, ner_y) @classmethod def load_labeling_corpus(cls, name=None): data_dict = { 'chinese_daily': cls.chinese_daily, # 'custom_1': cls.custom_1, } if name is None: name = random.choice(list(data_dict.keys())) return data_dict[name] @classmethod def load_classification_corpus(cls, name=None): data_dict = { 'smp_corpus': cls.smp_corpus } if name is None: name = random.choice(list(data_dict.keys())) return data_dict[name] @classmethod def load_multi_label_classification_corpus(cls, name=None): return text_x, multi_label_y if __name__ == "__main__": pass
455 Assign Cookies.py
krishna13052001/LeetCode
872
11128048
<gh_stars>100-1000 #!/usr/bin/python3 """ Assume you are an awesome parent and want to give your children some cookies. But, you should give each child at most one cookie. Each child i has a greed factor gi, which is the minimum size of a cookie that the child will be content with; and each cookie j has a size sj. If sj >= gi, we can assign the cookie j to the child i, and the child i will be content. Your goal is to maximize the number of your content children and output the maximum number. Note: You may assume the greed factor is always positive. You cannot assign more than one cookie to one child. """ class Solution: def findContentChildren(self, g, s): """ Greedy :type g: List[int] :type s: List[int] :rtype: int """ g.sort() s.sort() ret = 0 i = 0 j = 0 while i < len(g) and j < len(s): if g[i] <= s[j]: ret += 1 i += 1 j += 1 else: j += 1 return ret if __name__ == "__main__": assert Solution().findContentChildren([10,9,8,7], [5,6,7,8]) == 2
autocolorize/datasets.py
gustavla/autocolorizer
234
11128059
<filename>autocolorize/datasets.py from __future__ import division, print_function, absolute_import import os import glob import itertools as itr import numpy as np def load_image_list(dataset, offset=0, count=None, seed=None): if dataset == 'legacy': root_dir = "/share/data/vision-greg/larsson/data/legacy" img_list = glob.glob(os.path.join(root_dir, '*')) #name_list = [os.path.splitext(os.path.basename(fn))[0] for fn in sorted(glob.glob(os.path.join(root_dir, '*')))] #img_list = [os.path.join(root_dir, '{}.png'.format(fn)) for fn in name_list] elif dataset == 'charpiat': root_dir = "/share/data/vision-greg/larsson/data/charpiat" img_list = glob.glob(os.path.join(root_dir, '*')) #name_list = [os.path.splitext(os.path.basename(fn))[0] for fn in sorted(glob.glob(os.path.join(root_dir, '*')))] #img_list = [os.path.join(root_dir, '{}.png'.format(fn)) for fn in name_list] elif dataset == 'deep-sun': root_dir = "/share/data/vision-greg/larsson/data/SUN397" with open(os.path.join(root_dir, 'list.txt')) as f: img_list = [os.path.join(root_dir, fn.strip()) for fn in f.readlines()] elif dataset == 'sun': root_dir = "/share/data/vision-greg/larsson/data/uiuc-color-data/ground-truth" name_list = [os.path.splitext(os.path.basename(fn))[0] for fn in sorted(glob.glob(os.path.join(root_dir, '*.png')))] img_list = [os.path.join(root_dir, '{}.png'.format(fn)) for fn in name_list] elif dataset == 'pascal': #root_dir = "/share/data/vision-greg/Pascal/VOCdevkit/VOC2012" #name_list = [os.path.splitext(os.path.basename(fn))[0] for fn in sorted(glob.glob(os.path.join(ucm_dir, '*.mat')))] #img_list = ['/JPEGImages/{}.jpg'.format(fn) for fn in name_list] raise NotImplemented('Fix if needed') elif dataset == 'imagenet-val': root_dir = "/share/data/vision-greg/ImageNet/clsloc/256/images/val" with open("/share/data/vision-greg/larsson/ImageNet/imagenet_val.txt") as f: img_list = [os.path.join(root_dir, x.split()[0][1:]) for x in f.readlines()] elif dataset == 'imagenet': root_dir = "/share/data/vision-greg/ImageNet/clsloc/256/images/train" with open("/share/data/vision-greg/larsson/ImageNet/imagenet_train.txt") as f: img_list = [os.path.join(root_dir, x.split()[0][1:]) for x in f.readlines()] elif dataset == 'imagenet-cval1k': root_dir = "/share/data/vision-greg/ImageNet/clsloc/256/images/val" with open("/share/data/vision-greg/larsson/ImageNet/colorization/imagenet_cval1k.txt") as f: img_list = [os.path.join(root_dir, x.split()[0][1:]) for x in f.readlines()] elif dataset == 'imagenet-example': root_dir = "/share/data/vision-greg/ImageNet/clsloc/256/images/val" img_list = [os.path.join(root_dir, 'n12620546/ILSVRC2012_val_00039107.JPEG')] elif dataset == 'imagenet-ctest1k': root_dir = "/share/data/vision-greg/ImageNet/clsloc/256/images/val" with open("/share/data/vision-greg/larsson/ImageNet/colorization/imagenet_ctest1k.txt") as f: img_list = [os.path.join(root_dir, x.split()[0][1:]) for x in f.readlines()] elif dataset == 'imagenet-ctest10k': root_dir = "/share/data/vision-greg/ImageNet/clsloc/256/images/val" with open("/share/data/vision-greg/larsson/ImageNet/colorization/imagenet_ctest10k.txt") as f: img_list = [os.path.join(root_dir, x.split()[0][1:]) for x in f.readlines()] else: raise ValueError("Unknown dataset") if seed is not None: rs = np.random.RandomState(seed) rs.shuffle(img_list) if count is None: return img_list[offset:] else: return img_list[offset:offset + count]
基础教程/A2-神经网络基本原理/src/Data/ch08.py
microsoft/ai-edu
11,094
11128063
# Copyright (c) Microsoft. All rights reserved. # Licensed under the MIT license. See LICENSE file in the project root for full license information. import numpy as np from pathlib import Path import matplotlib.pyplot as plt train_data_name = "../../data/ch08.train.npz" test_data_name = "../../data/ch08.test.npz" def TargetFunction(x): p1 = np.sin(6.28*x) y = p1 return y def CreateSampleData(num_train, num_test): # create train data x1 = np.random.random((num_train,1)) y1 = TargetFunction(x1) + (np.random.random((num_train,1))-0.5)/5 np.savez(train_data_name, data=x1, label=y1) # create test data x2 = np.linspace(0,1,num_test).reshape(num_test,1) y2 = TargetFunction(x2) np.savez(test_data_name, data=x2, label=y2) def GetSampleData(): Trainfile = Path(train_data_name) Testfile = Path(test_data_name) if Trainfile.exists() & Testfile.exists(): TrainData = np.load(Trainfile) TestData = np.load(Testfile) return TrainData, TestData if __name__ == '__main__': CreateSampleData(500, 100) TrainData, TestData = GetSampleData() plt.scatter(TrainData["data"], TrainData["label"], s=1, c='b') #plt.scatter(TestData["data"], TestData["label"], s=4, c='r') plt.show()
src/tagStorm/softToTagStorm.py
andypohl/kent
171
11128072
<filename>src/tagStorm/softToTagStorm.py #!/usr/bin/env python2.7 # softToTagStorm # <NAME> 07/14/2015 # <EMAIL>/<EMAIL> """ Convert from soft format to tagStorm format. """ from __future__ import print_function import sys, operator, fileinput, collections, string, os.path import re, argparse, subprocess specialTags = ["!Sample_characteristics_"] # functions def warning(*objs): print("WARNING: ", *objs, file=sys.stderr) def error(*objs): """ Quit the program, if there is an error message spit it to stderr. """ print("ERROR: ", *objs, file=sys.stderr) sys.exit() def parseArgs(args): """ Parse the arguments into an opened file for reading (inputFile), and an open file for writing (outputFile). """ parser = argparse.ArgumentParser(description = __doc__) parser.add_argument ("inputFile", help = " Specifies that the text should be read in from an" " input file. ", type = argparse.FileType('r')) parser.add_argument ("outputFile", help = " Specifies that the word and count pairs should" " be printed to an output file. ", type = argparse.FileType('w')) parser.add_argument ("--tagHash", help = " The user provides a hash table that maps soft names to user supplied " " tags.", type = argparse.FileType('r')) parser.add_argument ("--strict", help = " The program will abort if a tag is encountered that is not in the " " user supplied tag file. Only works with the --tagHash option. ", action = "store_true") parser.add_argument ("--verbose", help = " Spit error messages. ", action = "store_true") parser.set_defaults (strict = False) parser.set_defaults (verbose = False) parser.set_defaults (tagHash = None) options = parser.parse_args() # Options is a structure that holds the command line arguments information. return options def main(args): """ Converts soft files to tagstorm files. Goes through the soft file looking for hierarchy tags. The lines in between these tags are stored in a buffer. When a new tag is encountered the depth is updated and the buffer is flushed. """ options = parseArgs(args) inputFile = options.inputFile outputFile = options.outputFile depth = 0 # The current depth. currentStanza = None firstLine = True tagHashTable = dict() if options.tagHash !=None: for line in options.tagHash: splitLine = line.split() tagHashTable.setdefault(splitLine[0], splitLine[1]) lineCount = 0 buffer = dict() for line in inputFile: lineCount += 1 splitLine = line.split('=') nameTag = splitLine[0][1:].replace(" ","").strip("\n") # Strip the '!' and any white space, store this as the name tag valTag = splitLine[1].strip("\n").strip(" ") for item in splitLine[2:]: # Gather the rest of the string, this is the value tag valTag += "=" + item valTag = valTag.replace("\n","") if options.tagHash != None: # Check if we are providing our own tags if splitLine[0][1:-1] not in tagHashTable: if (not options.strict) and options.verbose: warning("The tag %s in the soft file on line %i was not found in the provided hashTable." "The program is using the native tags."%(splitLine[0][1:-1], lineCount)) if options.strict: error("The tag %s in the soft file on line %i was not found in the provided hashTable." " The program is aborting."%(splitLine[0][1:-1], lineCount)) continue if line.startswith("!"): # Handle all general tags if ("!" + nameTag) in tagHashTable: # We have our own version of this tag buffer.setdefault(tagHashTable["!" + nameTag],valTag) else: if splitLine[0].startswith("!Sample_characteristics_"): subTags = splitLine[1].strip("\n").split(":") buffer.setdefault("GEO_Sample_"+subTags[0][1:].replace(" ","_"), subTags[1]) continue if buffer.get(nameTag): buffer[nameTag] += ", " + valTag[:-1] else: buffer.setdefault("GEO_"+nameTag,valTag) if line.startswith('^'): # Look for new stanzas and flush the previous data. if firstLine: firstLine = False if nameTag in tagHashTable : outputFile.write(tagHashTable[nameTag] + "\t" + valTag + "\n") else: outputFile.write("GEO_"+ nameTag + "\t" + valTag + "\n") continue for key,val in buffer.iteritems(): outputLine = ("%s\t%s\n"%(key,val.strip("\n"))) outputFile.write(depth * "\t" + outputLine) if splitLine[0] != currentStanza: # Only update depth when a new stanza is seen currentStanza = splitLine[0] depth += 1 buffer = dict() outputFile.write("\n") if nameTag in tagHashTable: outputFile.write(depth*"\t" + tagHashTable.get(nameTag) + "\t" + valTag + "\n") else: outputFile.write(depth*"\t" + "GEO_" + nameTag + "\t" + valTag + "\n") for key,val in buffer.iteritems(): # Flush the final buffer. outputLine = ("%s\t%s\n"%(key,val)) outputFile.write(depth * "\t" + outputLine) if __name__ == "__main__" : sys.exit(main(sys.argv))
cotk/dataloader/dataloader.py
ishine/cotk
117
11128076
<reponame>ishine/cotk<gh_stars>100-1000 '''A module for dataloader''' import random from typing import Optional, Any, Union, Sequence, Dict, Tuple, Iterable, List from collections import Counter, OrderedDict from itertools import chain import logging from hashlib import sha256 import numpy as np from .._utils import trim_before_target from .._utils.unordered_hash import UnorderedSha256, dumps from .._utils.metaclass import DocStringInheritor, LoadClassInterface, copy_func, copy_property from .._utils.typehint import OrderedDictType from ..file_utils import get_resource_file_path from .tokenizer import Tokenizer from .field import Field, SentenceDefault, _FieldContent, Sentence from .vocab import Vocab, GeneralVocab from .context import FieldContext, VocabContext class Dataloader(LoadClassInterface, metaclass=DocStringInheritor): '''Base class of Dataloader. ''' class LanguageProcessing(Dataloader): """Bases: :class:`.dataloader.Dataloader` Base class for all language processing tasks. This is an abstract class. During the initialization of a dataloader, :class:`Vocab`, :class:`Tokenizer` or :class:`Field` may be created. See :ref:`how to create a dataloader<customized_tasks_ref>`. Arguments:{FILE_ID_DOCS}{FIELD_DETAILS} """ FILE_ID_DOCS = r""" file_id (str): A string indicating the source (path) of the dataset. It can be local path (``"./data"``), a resource name (``"resources://dataset"``), or an url (``"http://test.com/dataset.zip"``). See :ref:`the details of file id<file_id>`.""" FIELD_DETAILS = r""" fields (List, OrderedDict, Dict): This arguments supports multiple input types: * If ``OrderDict`` or ``List``, it specify ``data format`` of the ``"train"``, ``"dev"``, ``"test"`` set. * A ``data format`` should be an ``OrderedDict`` or a ``List[Tuple]`` can be converted to ``OrderedDict``. * The ``key`` of ``data format`` is the name of a Field (used by :meth:`.get_batch`), and the ``value`` is either a class name of a Field or a :class:`Field` object. * Examples: >>> postField = SentenceDefault(...) >>> respField = SentenceDefault(...) >>> data_format = [("post", postField), ("resp", respField)] or >>> data_format = [("post", "SentenceDefault"), ("resp", "SentenceDefault")] * Examples: >>> fields = data_format equals to >>> fields = {"train": data_format, "dev": data_format, "test": data_format} * If ``Dict``, ``fields[key]`` describes ``data format`` of the set named ``key``. Examples: >>> fields = {"train": data_format, "extra": data_format} * See :ref:`how to create a dataloader<customized_tasks_ref>`.""" FIELD_REF = r""" fields (List, OrderedDict, Dict): See initialization of :class:`LanguageProcessing` for explanation. """ SHARED_ARGUMENTS = r'''{LanguageProcessing.FILE_ID_DOCS} {_FILE_ID_DEFAULT} {LanguageProcessing.TOKENIZER_DOCS} {_TOKENIZER_DEFAULT} {LanguageProcessing.MAX_SENT_LENGTH_DOCS} {_MAX_SENT_LENGTH_DEFAULT} {LanguageProcessing.CONVERT_TO_LOWER_LETTER_DOCS} {_CONVERT_TO_LOWER_LETTER_DEFAULT} {LanguageProcessing.MIN_FREQUENT_VOCAB_TIMES_DOCS} {_MIN_FREQUENT_VOCAB_TIMES_DEFAULT} {LanguageProcessing.MIN_RARE_VOCAB_TIMES_DOCS} {_MIN_RARE_VOCAB_TIMES_DEFAULT} {LanguageProcessing.PRETRAINED_DOCS} {_PRETAINED_DEFAULT}''' _FILE_ID_DEFAULT = "" TOKENIZER_DOCS = Sentence.TOKENIZER_DOCS _TOKENIZER_DEFAULT = Sentence.TOKENIZER_DEFAULT MAX_SENT_LENGTH_DOCS = Sentence.MAX_SENT_LENGTH_DOCS _MAX_SENT_LENGTH_DEFAULT = Sentence.MAX_SENT_LENGTH_DEFAULT CONVERT_TO_LOWER_LETTER_DOCS = Sentence.CONVERT_TO_LOWER_LETTER_DOCS _CONVERT_TO_LOWER_LETTER_DEFAULT = Sentence.CONVERT_TO_LOWER_LETTER_DEFAULT MIN_FREQUENT_VOCAB_TIMES_DOCS = GeneralVocab.MIN_FREQUENT_VOCAB_TIMES_DOCS _MIN_FREQUENT_VOCAB_TIMES_DEFAULT = GeneralVocab.MIN_FREQUENT_VOCAB_TIMES_DEFAULT MIN_RARE_VOCAB_TIMES_DOCS = GeneralVocab.MIN_RARE_VOCAB_TIMES_DOCS _MIN_RARE_VOCAB_TIMES_DEFAULT = GeneralVocab.MIN_RARE_VOCAB_TIMES_DEFAULT PRETRAINED_DOCS = r''' pretrained (str, optional): Use :ref:`pretrained field<pretrained_field_ref>` instead of :class:`SentenceDefault`.''' _PRETAINED_DEFAULT = "Default: If ``None``, no pretrained field used." # for docstring fields: Dict[str, "OrderedDict[str, Union[str, Field]]"] = {} '''This instance attribute shows fields of the dataloader (See the initialization of :class:`LanguageProcessing`). For example, the fields can be printed as follows: .. code-block:: python { 'train': OrderedDict([('sent', <cotk.dataloader.field.SentenceDefault object at 0x000001E170F8B588>)]), 'dev': OrderedDict([('sent', <cotk.dataloader.field.SentenceDefault object at 0x000001E170F8BB48>)]), 'test': OrderedDict([('sent', <cotk.dataloader.field.SentenceDefault object at 0x000001E170F8BEC8>)])} } ''' def __init__(self, file_id: str, \ fields: Union["OrderedDict[str, Union[str, Field]]", List[Tuple[str, Union[str, Field]]],\ Dict[str, Union["OrderedDict[str, Union[str, Field]]", List[Tuple[str, Union[str, Field]]]]]], \ ): self.file_id = file_id self.file_path = get_resource_file_path(file_id) with FieldContext.set_parameters(vocab=GeneralVocab(), weak=True) as field_context: fieldcontents: Dict[str, OrderedDictType[str, _FieldContent]] = {} self.fields: Dict[str, OrderedDictType[str, Field]] = {} if isinstance(fields, OrderedDict) or isinstance(fields, list): fields = {set_name: fields for set_name in ["train", "dev", "test"]} if isinstance(fields, dict): for set_name, fields_in_one_set in fields.items(): one_fields, one_fieldcontents = self._fill_field_and_create_content(set_name, fields_in_one_set) self.fields[set_name] = one_fields fieldcontents[set_name] = one_fieldcontents else: raise TypeError("Unknown type for fields") self._load_data(fieldcontents) self.vocabs = self._collect_vocabs_from_fields(self.fields) # self.default_vocab_id = 0 if len(self.vocabs) == 1 else None self.tokenizers = self._collect_tokenizers_from_fields(self.fields) # self.default_tokenizer_id = 0 if len(self.tokenizers) == 1 else None self.default_field_set_name: Optional[str] = None self.default_field_name: Optional[str] = None self._build_vocabs() self._setting_hash = self._create_setting_hash() self._vocab_hash = self._create_vocab_hash() self.data = self._get_data(fieldcontents) self._raw_data_hash, self._data_hash = self._create_data_hash(fieldcontents) self.index, self.batch_id, self.batch_size = self._init_batch(fieldcontents) @staticmethod def simple_create(file_id: str, \ fields: Union[OrderedDictType[str, Union[str, Field]],\ Dict[str, OrderedDictType[str, Union[str, Field]]]], \ **kwargs) -> "LanguageProcessing": '''A simple way to create a dataloader. Instead of using :class:`VocabContext` and :class:`FieldContext`, specifying all the possible parameters here. Arguments:{FILE_ID_DOCS}{FIELD_REF} **kwargs: Arguments passed to created :class:`Vocab` and :class:`Field`. ''' with VocabContext.set_parameters(**kwargs): with FieldContext.set_parameters(**kwargs): with FieldContext.set_parameters(tokenizer="space", weak=True): return LanguageProcessing(file_id, fields) def _load_data(self, fieldcontents: Dict[str, OrderedDictType[str, _FieldContent]]): '''Load data from file. Arguments: fieldcontents (Dict[str, OrderedDictType[str, _FieldContent]]): fieldcontents for each set ''' for set_name, fieldcontents_in_one_set in fieldcontents.items(): if not fieldcontents_in_one_set: raise RuntimeError("no field specified") with open("%s/%s.txt" % (self.file_path, set_name), encoding='utf-8') as f_file: line_cnt = 0 file_iterator = iter(f_file) while True: try: for _, fieldcontent in fieldcontents_in_one_set.items(): line_add = fieldcontent.read_next(file_iterator) if line_add == 0: while True: if next(file_iterator): raise RuntimeError("the file %s corrupted at line %d" % (set_name, line_cnt)) line_cnt += line_add except StopIteration: break sample_nums = [fieldcontent.get_data_number() for _, fieldcontent in fieldcontents_in_one_set.items()] if not all([sample_num == sample_nums[0] for sample_num in sample_nums]): raise RuntimeError("the file %s corrupted at end of the file") for _, fieldcontents_in_one_set in fieldcontents.items(): for _, fieldcontent in fieldcontents_in_one_set.items(): fieldcontent.process_before_vocab() def _init_batch(self, fieldcontents: Dict[str, OrderedDictType[str, _FieldContent]]) -> \ Tuple[Dict[str, List[int]], Dict[str, int], Dict[str, Optional[int]]]: '''Initialize the batches. Return a tuple contains ``index``, ``batch_id``, ``batch_size`` for each set. Arguments: fieldcontents (Dict[str, OrderedDictType[str, _FieldContent]]): fieldcontents for each set. ''' index: Dict[str, List[int]] = {} batch_id: Dict[str, int] = {} batch_size: Dict[str, Optional[int]] = {} for set_name, fieldcontents_in_one_set in fieldcontents.items(): sample_nums = [fieldcontent.get_data_number() \ for _, fieldcontent in fieldcontents_in_one_set.items()] batch_id[set_name] = 0 batch_size[set_name] = None index[set_name] = list(range(sample_nums[0])) return index, batch_id, batch_size def _get_data(self, fieldcontents: Dict[str, OrderedDictType[str, _FieldContent]]) -> \ Dict[str, Dict[str, Any]]: '''Get the data from fieldcontents. Arguments: fieldcontents (Dict[str, OrderedDict[str, _FieldContent]]): fieldcontents for each set. ''' data: Dict[str, Dict[str, Any]] = {} for set_name, fieldcontents_in_one_set in sorted(fieldcontents.items()): data[set_name] = {} for field_name, fieldcontent in fieldcontents_in_one_set.items(): data[set_name][field_name] = fieldcontent.get_data() return data def _build_vocabs(self): '''Invoke build vocab for each vocabulary''' for vocab in self.vocabs: vocab.build_vocab() def _collect_vocabs_from_fields(self, fields: Dict[str, OrderedDictType[str, Field]])\ -> List[Vocab]: '''Collect all vocabulary instances (deduplicated). Arguments: fieldcontents (Dict[str, OrderedDict[str, Field]]): field for each set. ''' vocabs: List[Vocab] = [] for _, fields_in_one_set in sorted(fields.items()): # sort to keep order for _, field in fields_in_one_set.items(): vocab = field.get_vocab() if vocab is not None and vocab not in vocabs: vocabs.append(vocab) return vocabs def _collect_tokenizers_from_fields(self, fields: Dict[str, OrderedDictType[str, Field]])\ -> List[Tokenizer]: '''Collect all tokenizer instances (deduplicated). Arguments: fieldcontents (Dict[str, OrderedDict[str, Field]]): field for each set. ''' tokenizers: List[Tokenizer] = [] tokenizers_setting_hash: List[str] = [] for _, fields_in_one_set in sorted(fields.items()): # sort to keep order for _, field in fields_in_one_set.items(): tokenizer = field.get_tokenizer() if tokenizer is not None and tokenizer.get_setting_hash() not in tokenizers_setting_hash: tokenizers.append(tokenizer) tokenizers_setting_hash.append(tokenizer.get_setting_hash()) return tokenizers def _fill_field_and_create_content(self, set_name: str, fields: \ Union[OrderedDictType[str, Union[str, Field]], List[Tuple[str, Union[str, Field]]]], \ ) -> \ Tuple[OrderedDictType[str, Field], OrderedDictType[str, _FieldContent]]: '''Create and return fields and field contexts. Arguments: set_name(str): name of the set field (OrderedDictType[str, Union[str, Field]]): fields for the set. ''' fieldcontents: OrderedDictType[str, _FieldContent] = OrderedDict() new_fields: OrderedDictType[str, Field] = OrderedDict() fields_iter: Iterable[Tuple[str, Union[str, Field]]] if isinstance(fields, OrderedDict): fields_iter = fields.items() elif isinstance(fields, list): fields_iter = fields else: raise TypeError("Unexpected Type for fields") for name, field_name in fields_iter: if isinstance(field_name, str): field = Field.load_class(field_name)() elif isinstance(field_name, Field): field = field_name else: raise TypeError("Each value of `fields` must be a Field object or a string indicating the name of a Field class.") fieldcontent = field._create(set_name) #pylint: disable=protected-access fieldcontents[name] = fieldcontent new_fields[name] = field return new_fields, fieldcontents def _create_data_hash(self, fieldcontents): raw_data_hash = sha256() data_hash = sha256() for _, fieldcontents_in_one_set in sorted(fieldcontents.items()): for _, fieldcontent in fieldcontents_in_one_set.items(): raw_data_hash.update(dumps(fieldcontent.get_raw_data_hash())) data_hash.update(dumps(fieldcontent.get_data_hash())) return raw_data_hash.hexdigest(), data_hash.hexdigest() def _create_setting_hash(self): setting_hash = sha256() for _, fields_in_one_set in sorted(self.fields.items()): for _, field in fields_in_one_set.items(): setting_hash.update(dumps(field._get_setting_hash(self.vocabs))) #pylint: disable=protected-access for vocab in self.vocabs: setting_hash.update(dumps(vocab.get_setting_hash())) for tokenizer in self.tokenizers: setting_hash.update(dumps(tokenizer.get_setting_hash())) return setting_hash.hexdigest() def _create_vocab_hash(self): vocab_hash = sha256() for vocab in self.vocabs: vocab_hash.update(dumps(vocab.get_vocab_hash())) return vocab_hash.hexdigest() def get_default_vocab(self) -> Vocab: '''Get the default :class:`Vocab` in this dataloader. It can be set by :meth:`.set_default_field`. ''' vocab = self.get_default_field().get_vocab() if vocab is None: raise ValueError("This field do not have vocab") return vocab def get_default_tokenizer(self) -> Tokenizer: '''Get the default :class:`Tokenizer` in this dataloader. It can be set by :meth:`.set_default_field`. ''' tokenizer = self.get_default_field().get_tokenizer() if tokenizer is None: raise ValueError("This field do not have tokenizer") return tokenizer def get_default_field(self) -> Field: '''Get the default :class:`Field` in this dataloader. It can be set by :meth:`.set_default_field`. ''' if self.default_field_name is None or self.default_field_set_name is None: raise RuntimeError("No default field. \ Specify the default field by set_default_field.") return self.fields[self.default_field_set_name][self.default_field_name] SET_NAME_DESCRIPTION = '''set_name (str): The name of set. For example: ``"train"``, ``"dev"``, ``"test"``.''' FIELD_NAME_DESCRIPTION = '''field_name (str): The name of field.''' def set_default_field(self, set_name: str, field_name: str): '''Set the default :class:`Field` in this dataloader. In the meanwhile, the default :class:`Vocab` and :class:`Tokenizer` is also set according to the field (if the field have vocab and tokenizer). The default field will affect the action in the following methods: * :meth:`get_default_field` * :meth:`tokenize` * :meth:`tokenize_sentences` * :meth:`convert_tokens_to_ids` * :meth:`convert_ids_to_tokens` * :meth:`convert_ids_to_sentence` * :meth:`convert_sentence_to_ids` * :meth:`add_special_to_ids` * :meth:`remove_special_in_ids` * :meth:`process_sentences` * :meth:`trim_in_ids` * :meth:`get_default_vocab` * :meth:`get_special_tokens_mapping` * :meth:`get_special_tokens_id` * :meth:`get_default_tokenizer` Arguments: {SET_NAME_DESCRIPTION} {FIELD_NAME_DESCRIPTION} ''' if set_name not in self.fields: raise KeyError("No such set named %s" % set_name) elif field_name not in self.fields[set_name]: raise KeyError("No such field named %s" % field_name) self.default_field_set_name = set_name self.default_field_name = field_name # tokenizer = self.fields[set_name][field_name].get_tokenizer() # if tokenizer: # self.set_default_tokenizer(tokenizer) # vocab = self.fields[set_name][field_name].get_vocab() # if vocab: # self.set_default_vocab(vocab) def get_field(self, set_name: str, field_name: str) -> Field: '''Get :class:`Field` according to name of set and field. Arguments: {SET_NAME_DESCRIPTION} {FIELD_NAME_DESCRIPTION} ''' return self.fields[set_name][field_name] def get_general_hash(self) -> str: '''General hash. Identifying all details in dataloader, including raw data before processed, tokenized data, vocabulary, and settings. See :ref:`dataloader hash<dataloader_hash_ref>` for explaination. ''' general_hash = sha256() general_hash.update(dumps(self._raw_data_hash)) general_hash.update(dumps(self._data_hash)) general_hash.update(dumps(self._vocab_hash)) general_hash.update(dumps(self._setting_hash)) return general_hash.hexdigest() def get_raw_data_hash(self) -> str: '''Raw data hash. Identifying raw data before processed. See :ref:`dataloader hash<dataloader_hash_ref>` for explaination. ''' return self._raw_data_hash def get_data_hash(self) -> str: '''Data hash. Identifying data after processed (tokenized). See :ref:`dataloader hash<dataloader_hash_ref>` for explaination. ''' return self._data_hash def get_vocab_hash(self) -> str: '''Vocab hash. Identifying vocabulary. See :ref:`dataloader hash<dataloader_hash_ref>` for explaination. ''' return self._vocab_hash def get_setting_hash(self) -> str: '''Setting hash, identifying settings to create the data loader. See :ref:`dataloader hash<dataloader_hash_ref>` for explaination. ''' return self._setting_hash def restart(self, set_name, batch_size=None, shuffle=True): '''Initialize batches. This function be called before :func:`get_next_batch` or an epoch is end. See :meth:`get_next_batch` for examples. Arguments: {SET_NAME_DESCRIPTION} batch_size (int): the number of sample in a batch. default: if ``None``, last ``batch_size`` is used. shuffle (bool): whether to shuffle the data. Default: ``True``. ''' if set_name not in self.fields: raise ValueError("No set named %s." % set_name) if batch_size is None and self.batch_size[set_name] is None: raise ValueError("You need batch_size to initialize.") if shuffle: # rng_state = random.getstate() random.shuffle(self.index[set_name]) # random.setstate(rng_state) self.batch_id[set_name] = 0 if batch_size is not None: self.batch_size[set_name] = batch_size batch_size_div = self.batch_size[set_name] assert batch_size_div is not None print("%s set restart, %d batches and %d left" % (set_name, \ len(self.index[set_name]) // batch_size_div, \ len(self.index[set_name]) % batch_size_div)) _GET_BATCH_MORE_DOC = "Return a merged dict containing all the data from each field by calling :meth:`.field.get_batch`. " \ "See examples in subclasses for the return value of predefined tasks." _GET_BATCH_EXAMPLE = "" def get_batch(self, set_name: str, indexes: List[int]) -> Dict[str, Any]: '''Get a batch of data with specified ``indexes``. {_GET_BATCH_MORE_DOC} :meth:`get_next_batch`, :meth:`get_batches`, :meth:`get_all_batch` provide other methods to get batched data, Their return values are consistent with this methods. Arguments: {SET_NAME_DESCRIPTION} indexes (list): a list of specified indexes of batched data. {_GET_BATCH_EXAMPLE} ''' if set_name not in self.fields: raise ValueError("No set named %s." % set_name) res: Dict[str, Any] = {} for field_name, field_obj in self.fields[set_name].items(): res.update(field_obj.get_batch(field_name, self.data[set_name][field_name], indexes)) #pylint: disable=protected-access return res IGNORE_LEFT_SAMPLES = "ignore_left_samples (bool): If the number of the samples is not divisible by ``batch_size``, " \ "ignore the left samples less than ``batch_size`` " \ "Setting it to ``True`` make that every batch will have the same number of samples. " \ "Default: ``False``." def get_next_batch(self, set_name, ignore_left_samples=False) -> Optional[Dict[str, Any]]: '''Get next batch. It can be called only after Initializing batches (:func:`restart`). Return a dict like :func:`get_batch`, or None if the epoch is end. Arguments: {SET_NAME_DESCRIPTION} {IGNORE_LEFT_SAMPLES} Examples: >>> dataloader.restart("train") >>> while True: >>> data = dataloader.get_next_batch("train") >>> if data: >>> break >>> print(data) ''' if set_name not in self.fields: raise ValueError("No set named %s." % set_name) batch_size = self.batch_size[set_name] if batch_size is None: raise RuntimeError( \ "Please run restart before calling this function.") batch_id = self.batch_id[set_name] start, end = batch_id * \ batch_size, (batch_id + 1) * batch_size if start >= len(self.index[set_name]): return None if ignore_left_samples and end > len(self.index[set_name]): return None index = self.index[set_name][start:end] res = self.get_batch(set_name, index) self.batch_id[set_name] += 1 return res def get_batches(self, set_name, batch_size=None, shuffle=True, ignore_left_samples=False) -> Iterable[Dict[str, Any]]: '''An iterable generator over batches. It first call :func:`restart`, and then :func:`get_next_batch` until no more data is available. Returns an iterable generator where each element is like :func:`get_batch`. Arguments: {SET_NAME_DESCRIPTION} batch_size (int, optional): default: ``None``. Use ``batch_size`` by default. shuffle (bool): whether to shuffle the data. Default: ``True``. {IGNORE_LEFT_SAMPLES} ''' self.restart(set_name, batch_size, shuffle) while True: res = self.get_next_batch(set_name, ignore_left_samples) if res is None: break yield res def get_all_batch(self, set_name) -> Dict[str, List[Any]]: r'''Concatenate all batches to a single dict, where padding will not be applied. Returns a dict like :func:`get_batch` with all valid ``indexes``, but all the sentences are not padded and their type will be converted to list. Exactly, this function called :func:`get_batch` where ``len(indexes)==1`` multiple times and concatenate all the values in the returned dicts. Arguments: {SET_NAME_DESCRIPTION} ''' res: Dict[str, List[Any]] = {} for idx in self.index[set_name]: batch = self.get_batch(set_name, [idx]) for attr, val in batch.items(): if attr not in res: res[attr] = [] if not isinstance(val, (list, np.ndarray)): val = [val] res[attr].extend(val) return res # copy some functions from vocab _VOCAB_MORE_DOCSTRING = '''It calls the identical method of the :class:`Vocab` instance ``vocab``,\ from :meth:`.get_default_vocab()`.''' frequent_vocab_size = copy_property(get_default_vocab, Vocab, "frequent_vocab_size") all_vocab_size = copy_property(get_default_vocab, Vocab, "all_vocab_size") frequent_vocab_list = copy_property(get_default_vocab, Vocab, "frequent_vocab_list") all_vocab_list = copy_property(get_default_vocab, Vocab, "all_vocab_list") get_special_tokens_mapping = copy_func(get_default_vocab, Vocab, "get_special_tokens_mapping") get_special_tokens_id = copy_func(get_default_vocab, Vocab, "get_special_tokens_id") pad_id = copy_property(get_default_vocab, Vocab, "pad_id") unk_id = copy_property(get_default_vocab, Vocab, "unk_id") go_id = copy_property(get_default_vocab, Vocab, "go_id") eos_id = copy_property(get_default_vocab, Vocab, "eos_id") _SENTENCE_MORE_DOCSTRING = '''It calls the identical method of the :class:`Sentence` instance ``sentence``,\ from :meth:`.get_default_field()`.''' _SESSION_MORE_DOCSTRING = '''It calls the identical method of the :class:`Session` instance ``session``,\ from :meth:`.get_default_field()`.''' tokenize = copy_func(get_default_field, Sentence, "tokenize") tokenize_sentences = copy_func(get_default_field, Sentence, "tokenize_sentences") convert_tokens_to_ids = copy_func(get_default_field, Sentence, "convert_tokens_to_ids") convert_ids_to_tokens = copy_func(get_default_field, Sentence, "convert_ids_to_tokens") convert_ids_to_sentence = copy_func(get_default_field, Sentence, "convert_ids_to_sentence") convert_sentence_to_ids = copy_func(get_default_field, Sentence, "convert_sentence_to_ids") add_special_to_ids = copy_func(get_default_field, Sentence, "add_special_to_ids") remove_special_in_ids = copy_func(get_default_field, Sentence, "remove_special_in_ids") process_sentences = copy_func(get_default_field, Sentence, "process_sentences") trim_in_ids = copy_func(get_default_field, Sentence, "trim_in_ids")
plenum/test/exceptions.py
andkononykhin/plenum
148
11128094
class NotFullyConnected(Exception): pass class TestException(Exception): pass
bip/base/bipida.py
paulfariello-syn/bip
145
11128097
import ida_kernwin class BipIda(object): """ Class for regrouping interfaces with IDA in itself. This can include configurations and thing specific to the IDA API. Currently this contain only static methods. """ @staticmethod def exec_sync(func, *args, **kwargs): """ Wrap around the execute_sync API to perform a call on the function ``func`` in the main thread. If a function is not marked as THREAD_SAFE in the headers, then it can only be called from the main thread of IDA. .. todo:: unit test :param func: The function to call. :type func: Python Callable :param args: Arguments to ``func`` :param kwargs: Keyworded arguments to ``func`` :param MFF_FLAG: Flag describing the operation on the database. Default ``MFF_READ``. Can be ``MFF_FAST``, ``MFF_READ``, ``MFF_WRITE`` or ``MFF_NOWAIT`` (from ida_kernwin). :type MFF_FLAG: int :return: The return of ``func`` """ MFF_FLAG = kwargs.get("MFF_FLAG", ida_kernwin.MFF_READ) ret = {"ret": None} def handle(): ret["ret"] = func(*args, **kwargs) return 1 ida_kernwin.execute_sync(handle, MFF_FLAG) return ret["ret"]
gluoncv/data/recordio/__init__.py
Kh4L/gluon-cv
5,447
11128198
<reponame>Kh4L/gluon-cv """Datasets from RecordIO files."""
unitest/test_memory_cache.py
reyoung/PatrickStar
494
11128229
# BSD 3-Clause License # # Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the psutil authors nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest import torch from patrickstar.core.memory_cache import MemoryCache from patrickstar.core.memtracer import RuntimeMemTracer class TestMemoryCache(unittest.TestCase): def setUp(self): self.default_chunk_size = 40 def test_case1(self): self.compute_device = ( torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu") ) memtracer = RuntimeMemTracer() memory_cache = MemoryCache(2, memtracer) payload1 = memory_cache.pop_or_allocate( self.compute_device, 10, torch.float, False ) payload1_addr = payload1.data_ptr() memory_cache.push(payload1) payload2 = memory_cache.pop_or_allocate( self.compute_device, 10, torch.float, False ) self.assertTrue(payload1_addr == payload2.data_ptr()) payload3 = memory_cache.pop_or_allocate( self.compute_device, 10, torch.float, False ) self.assertTrue(payload1_addr != payload3.data_ptr()) print("payload3 ", payload3.data_ptr()) payload2_addr = payload2.data_ptr() memory_cache.push(payload2) memory_cache.push(payload3) payload4 = memory_cache.pop_or_allocate( self.compute_device, 10, torch.float, False, ) self.assertTrue(payload2_addr == payload4.data_ptr()) if __name__ == "__main__": unittest.main()
jsonrpcserver/utils.py
bcb/jsonrpcserver
144
11128250
from functools import reduce from typing import Any, Callable, List identity = lambda x: x def compose(*fs: Callable[..., Any]) -> Callable[..., Any]: def compose2(f: Callable[..., Any], g: Callable[..., Any]) -> Callable[..., Any]: return lambda *a, **kw: f(g(*a, **kw)) return reduce(compose2, fs) def make_list(x: Any) -> List[Any]: return [x] if not isinstance(x, list) else x
logistic-regression/mnist_dataset.py
eliben/deep-learning-samples
183
11128340
<filename>logistic-regression/mnist_dataset.py<gh_stars>100-1000 # Helper code for downloading, unpickling and displaying MNIST data. # # <NAME> (http://eli.thegreenplace.net) # This code is in the public domain from __future__ import print_function import cPickle as pickle import gzip import os from shutil import copyfileobj from urllib2 import urlopen from urlparse import urljoin import matplotlib.pyplot as plt import numpy as np def maybe_download(base_url, filename, expected_size, force=False): """Download a file if not present, and make sure it's the right size.""" if force or not os.path.exists(filename): print('Attempting to download:', filename) in_stream = urlopen(urljoin(base_url, filename)) with open(filename, 'wb') as out_file: copyfileobj(in_stream, out_file) print('Download Complete!') statinfo = os.stat(filename) if statinfo.st_size == expected_size: print('Found and verified', filename) return True else: print('Unable to verify size: {0} vs. expected {1}'.format( statinfo.st_size, expected_size)) return False def load_pickle_from_gz(filename): """Load a pickle from a gzip archive.""" with gzip.open(filename, 'rb') as f: return pickle.loads(f.read()) def get_mnist_data(): """Get data sets for MNIST. If needed, downloads the data as a pickled .gz archive; Taken from my mirror of the archive at http://deeplearning.net/tutorial/gettingstarted.html. The pickle contains 3 sets in a tuple: training, validation and test data sets. Each data set is a pair of numpy arrays: data (N x 784) and numeric labels (N,) where N is the set size. """ baseurl = 'http://thegreenplace.net/files/' filename = 'mnist.pkl.gz' if maybe_download(baseurl, filename, expected_size=16168813): return load_pickle_from_gz(filename) else: return None def display_mnist_image(x, y=None): """Displays a single mnist image with a label. x: (784,) image vector, as stored in the mnist pickle. y: optional numeric label """ xmat = x.reshape(28, 28) plt.imshow(xmat, cmap='gray') if y is not None: plt.title('label={0}'.format(y)) plt.show() def display_multiple_images(xs): """Displays multiple images side-by-side in subplots.""" fig = plt.figure() fig.set_tight_layout(True) for i, x in enumerate(xs): ax = fig.add_subplot(1, len(xs), i + 1) ax.imshow(x.reshape(28, 28), cmap='gray') plt.show() def convert_y_to_binary(y, correct_digit): """Converts a vector y taken from MNIST data to binary "is it this digit". y: array of digits. correct_digit: the digit we expect to be "correct" Returns array of +1 or -1; +1 where the original y had the "correct" digit, and -1 otherwise. The returned array is always a column vector. """ return np.where(y == correct_digit, np.ones_like(y), -1 * np.ones_like(y)).reshape(y.size, 1) if __name__ == '__main__': train, valid, test = get_mnist_data() print('Train shapes:', train[0].shape, train[1].shape) print('Valid shapes:', valid[0].shape, valid[1].shape) print('Test shapes:', test[0].shape, test[1].shape) #display_mnist_image(train[0][20], train[1][20]) display_multiple_images((train[0][9974], train[0][9734], train[0][9161], train[0][8788]))
data_processing/updated_avg_files.py
astraetech/Options_Data_Science
292
11128362
""" Say you Initially This script takes the Options_averages_calls.db & Options_averages_puts.db files created with contracts_avg_volume.py combines it with the """ import sqlite3 import os import pandas as pd import time from sqlalchemy import create_engine from pandas.io.sql import DatabaseError os.system('afplay /System/Library/Sounds/Sosumi.aiff') def get_time_now(): curr_time = time.localtime() curr_clock = time.strftime("%H:%M:%S", curr_time) curr_m = time.strftime('%m') curr_y_d = time.strftime('%d%Y') int_curr_clock = int(f'{curr_clock[:2]}{curr_clock[3:5]}') return int_curr_clock, curr_m, curr_y_d t, mon, day = get_time_now() def add_rows(clean_data, table_name, calls_puts, d): temp_d = d # d = int(d) - 10000 # for doing averages of yesterday d = int(d) if temp_d[0] == '0': engine = create_engine(f'sqlite:///Options_averages_{calls_puts}_0{d}.db', echo=False) else: engine = create_engine(f'sqlite:///Options_averages_{calls_puts}_{d}.db', echo=False) clean_data.to_sql(table_name, con=engine, if_exists='append', index_label='index') return 0 def update_averages(data_file, call_file, put_file): con = sqlite3.connect(f'AvgData/{data_file}') # next mining day file con_c = sqlite3.connect(f'{call_file}') # last average file con_p = sqlite3.connect(f'{put_file}') # last average file cursor = con.cursor() cursor.execute("SELECT name FROM sqlite_master WHERE type='table'") for tab in cursor.fetchall(): Tab = str(tab[0]) temp_date = (mon+day)[:4] if Tab[0][0] == 'c': calls = pd.read_sql_query( f'SELECT symbol, totalVolume, quoteTimeInLong FROM \'{Tab}\' WHERE (totalVolume > 10) AND (symbol LIKE \"%!_07%\" ESCAPE \"!\" OR symbol LIKE \"%!_08%\" ESCAPE \"!\") ', con) symbol_count = 0 for i, r in calls.iterrows(): temp_sym = r['symbol'] temp_stock = temp_sym.split('_')[0] next_date = temp_sym.split('_')[1][:4] if next_date < (mon+day)[:4]: continue if next_date >= temp_date: temp_date = next_date try: temp_calls = pd.read_sql_query(f'SELECT * FROM \'{Tab + temp_date}\' ', con_c) if temp_sym in temp_calls.values: temp_row = temp_calls.loc[temp_calls['symbol'] == temp_sym].copy() temp_row.drop(['index'], axis='columns', inplace=True) temp_row.index = [symbol_count] temp_avg = temp_row['avgVolume'] * temp_row['daysComputed'] temp_days = temp_row['daysComputed'] + 1 temp_avg = (temp_avg + r['totalVolume']) // temp_days temp_row.at[symbol_count, 'avgVolume'] = temp_avg temp_row.at[symbol_count, 'daysComputed'] = temp_days symbol_count = symbol_count + 1 add_rows(temp_row, f'c{temp_stock}{next_date}', 'calls', f'{mon}{day}') else: new_row = {'symbol': [temp_sym], 'avgVolume': [r['totalVolume']], 'daysComputed': [1]} working_new = pd.DataFrame(data=new_row) working_new.index = [symbol_count] add_rows(working_new, f'c{temp_stock}{next_date}', 'calls', f'{mon}{day}') symbol_count = symbol_count + 1 except DatabaseError as e: if e.args[0].startswith('Execution failed on sql'): new_row = {'symbol': [temp_sym], 'avgVolume': [r['totalVolume']], 'daysComputed': [1]} working_new = pd.DataFrame(data=new_row) working_new.index = [symbol_count] add_rows(working_new, f'c{temp_stock}{next_date}', 'calls', f'{mon}{day}') symbol_count = symbol_count + 1 else: pass else: pass else: puts = pd.read_sql_query( f'SELECT symbol, totalVolume, quoteTimeInLong FROM \'{Tab}\' WHERE (totalVolume > 10) AND (symbol LIKE \"%!_07%\" ESCAPE \"!\" OR symbol LIKE \"%!_08%\" ESCAPE \"!\") ', con) symbol_count = 0 for i, r in puts.iterrows(): temp_sym = r['symbol'] temp_stock = temp_sym.split('_')[0] next_date = temp_sym.split('_')[1][:4] if next_date < (mon + day)[:4]: continue if next_date >= temp_date: temp_date = next_date try: temp_puts = pd.read_sql_query(f'SELECT * FROM \'{Tab + temp_date}\' ', con_p) if temp_sym in temp_puts.values: temp_row = temp_puts.loc[temp_puts['symbol'] == temp_sym].copy() temp_row.drop(['index'], axis='columns', inplace=True) temp_row.index = [symbol_count] temp_avg = temp_row['avgVolume'] * temp_row['daysComputed'] temp_days = temp_row['daysComputed'] + 1 temp_avg = (temp_avg + r['totalVolume']) // temp_days temp_row.at[symbol_count, 'avgVolume'] = temp_avg temp_row.at[symbol_count, 'daysComputed'] = temp_days symbol_count = symbol_count + 1 add_rows(temp_row, f'p{temp_stock}{next_date}', 'puts', f'{mon}{day}') else: new_row = {'symbol': [temp_sym], 'avgVolume': [r['totalVolume']], 'daysComputed': [1]} working_new = pd.DataFrame(data=new_row) working_new.index = [symbol_count] add_rows(working_new, f'p{temp_stock}{next_date}', 'puts', f'{mon}{day}') symbol_count = symbol_count + 1 except DatabaseError as e: if e.args[0].startswith('Execution failed on sql'): new_row = {'symbol': [temp_sym], 'avgVolume': [r['totalVolume']], 'daysComputed': [1]} working_new = pd.DataFrame(data=new_row) working_new.index = [symbol_count] add_rows(working_new, f'p{temp_stock}{next_date}', 'puts', f'{mon}{day}') symbol_count = symbol_count + 1 else: pass else: pass con_c.close() con_p.close() con.close() return 0 # update_averages(Data file of mined data from previous day ) update_averages('Options_jul282021.db', 'Options_averages_calls_07272021.db', 'Options_averages_puts_07272021.db') os.system('say "Finished processing the average volume files."') os.system('afplay /System/Library/Sounds/Sosumi.aiff')
miscellanies/torch/checkpoint.py
zhangzhengde0225/SwinTrack
143
11128363
import torch import os import shutil from miscellanies.torch.distributed import is_main_process def _get_training_state_file_path(model_state_file_path: str): return os.path.join(os.path.dirname(model_state_file_path), os.path.splitext(os.path.basename(model_state_file_path))[0] + '-training.pth') def _safe_rename(model_state_file_path, training_state_file_path, overwrite_existing=True): if overwrite_existing and os.path.exists(model_state_file_path): os.remove(model_state_file_path) if overwrite_existing and os.path.exists(training_state_file_path): os.remove(training_state_file_path) os.rename(model_state_file_path + '.tmp', model_state_file_path) os.rename(training_state_file_path + '.tmp', training_state_file_path) def _fail_safe_save(path, model_state_dict, training_state_dict, overwrite_existing=True): training_state_file_path = _get_training_state_file_path(path) torch.save(model_state_dict, path + '.tmp') torch.save(training_state_dict, training_state_file_path + '.tmp') _safe_rename(path, training_state_file_path, overwrite_existing) def _fail_safe_copy(src_path, dst_path, overwrite_existing=True): src_training_state_file_path = _get_training_state_file_path(src_path) dst_training_state_file_path = _get_training_state_file_path(dst_path) shutil.copy(src_path, dst_path + '.tmp') shutil.copy(src_training_state_file_path, dst_training_state_file_path + '.tmp') _safe_rename(dst_path, dst_training_state_file_path, overwrite_existing) def dump_checkpoint(model_state_dict, objects_state_dict, epoch, output_path): if not is_main_process(): return checkpoint_path = os.path.join(output_path, 'checkpoint.pth') _fail_safe_save(checkpoint_path, model_state_dict, objects_state_dict) backup_checkpoint_path = os.path.join(output_path, f'checkpoint{epoch:04}.pth') _fail_safe_copy(checkpoint_path, backup_checkpoint_path) def load_checkpoint(checkpoint_path: str): training_state_file_path = _get_training_state_file_path(checkpoint_path) model_state_dict = torch.load(checkpoint_path, map_location='cpu') training_state_dict = torch.load(training_state_file_path, map_location='cpu') return model_state_dict, training_state_dict
examples/integrations/opfython/supervised_opf_feature_selection.py
anukaal/opytimizer
528
11128386
<filename>examples/integrations/opfython/supervised_opf_feature_selection.py import opfython.math.general as g import opfython.stream.splitter as s from opfython.models.supervised import SupervisedOPF from sklearn.datasets import load_digits import opytimizer.math.random as r from opytimizer import Opytimizer from opytimizer.core import Function from opytimizer.optimizers.boolean import BPSO from opytimizer.spaces import BooleanSpace # Loads digits dataset digits = load_digits() # Gathers samples and targets X = digits.data Y = digits.target # Adding 1 to labels, i.e., OPF should have labels from 1+ Y += 1 # Splits data into training and testing sets X_train, X_val, Y_train, Y_val = s.split( X, Y, percentage=0.5, random_state=1) def supervised_opf_feature_selection(opytimizer): # Gathers features features = opytimizer[:, 0].astype(bool) # Remaking training and validation subgraphs with selected features X_train_selected = X_train[:, features] X_val_selected = X_val[:, features] # Creates a SupervisedOPF instance opf = SupervisedOPF(distance='log_squared_euclidean', pre_computed_distance=None) # Fits training data into the classifier opf.fit(X_train_selected, Y_train) # Predicts new data preds = opf.predict(X_val_selected) # Calculates accuracy acc = g.opf_accuracy(Y_val, preds) return 1 - acc # Number of agents and decision variables n_agents = 5 n_variables = 64 # Parameters for the optimizer params = { 'c1': r.generate_binary_random_number(size=(n_variables, 1)), 'c2': r.generate_binary_random_number(size=(n_variables, 1)) } # Creates the space, optimizer and function space = BooleanSpace(n_agents, n_variables) optimizer = BPSO() function = Function(supervised_opf_feature_selection) # Bundles every piece into Opytimizer class opt = Opytimizer(space, optimizer, function) # Runs the optimization task opt.start(n_iterations=3)
modules/templates/locations/PK/config.py
whanderley/eden
205
11128403
# -*- coding: utf-8 -*- from gluon import current def config(settings): """ Template settings for Pakistan - designed to be used in a Cascade with an application template """ #T = current.T # Pre-Populate settings.base.prepopulate.append("locations/PK") # Uncomment to restrict to specific country/countries settings.gis.countries.append("PK") # Disable the Postcode selector in the LocationSelector settings.gis.postcode_selector = False # L10n (Localization) settings settings.L10n.languages["ur"] = "Urdu" settings.L10n.languages["pa"] = "Punjabi" settings.L10n.languages["ps"] = "Pashto" settings.L10n.languages["sd"] = "Sindhi" settings.L10n.languages["bal"] = "Balochi" # Default Language (put this in custom template if-required) #settings.L10n.default_language = "ur" # Default timezone for users settings.L10n.timezone = "Asia/Karachi" # Default Country Code for telephone numbers settings.L10n.default_country_code = 92 settings.fin.currencies["PKR"] = "Pakistani Rupees" settings.fin.currency_default = "PKR" # END =========================================================================
tests/strategies/test_blshsl.py
Dexius/12test12
103
11128411
<reponame>Dexius/12test12 #!/usr/bin/env python # -*- coding: utf-8 -*- """ test_blshsl.py ---------------------------------- This will test the blshsl strategy. Which means basically buy low, sell high, stop loss. """ def test_monoton_raising(): """Monoton raising chart. State is still in in initial phase. Therefor no signal is emitted""" from cointrader.indicators import followtrend chart1 = [(0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (0, 9)] signal = followtrend(chart1) assert signal.value == 0
pixelssl/nn/module/gaussian_blur.py
charlesCXK/PixelSSL
223
11128418
<reponame>charlesCXK/PixelSSL import math import numpy as np import scipy.ndimage import torch import torch.nn as nn from pixelssl.utils import logger class GaussianBlurLayer(nn.Module): """ Add Gaussian Blur to a 4D tensor This layer takes a 4D tensor of {N, C, H, W} as input. The Gaussian blur will be performed in given channel number (C) splitly. """ def __init__(self, channels, kernel_size): """ Arguments: channels (int): Channel for input tensor kernel_size (int): Size of the kernel used in blurring """ super(GaussianBlurLayer, self).__init__() self.channels = channels self.kernel_size = kernel_size assert self.kernel_size % 2 != 0 self.op = nn.Sequential( nn.ReflectionPad2d(math.floor(self.kernel_size / 2)), nn.Conv2d(channels, channels, self.kernel_size, stride=1, padding=0, bias=None, groups=channels) ) self._init_kernel() def forward(self, x): """ Arguments: x (torch.Tensor): input 4D tensor Returns: torch.Tensor: Blurred version of the input """ if not len(list(x.shape)) == 4: logger.log_err('\'GaussianBlurLayer\' requires a 4D tensor as input\n') elif not x.shape[1] == self.channels: logger.log_err('In \'GaussianBlurLayer\', the required channel ({0}) is' 'not the same as input ({1})\n'.format(self.channels, x.shape[1])) return self.op(x) def _init_kernel(self): sigma = 0.3 * ((self.kernel_size - 1) * 0.5 - 1) + 0.8 n = np.zeros((self.kernel_size, self.kernel_size)) i = math.floor(self.kernel_size / 2) n[i, i] = 1 kernel = scipy.ndimage.gaussian_filter(n, sigma) for name, param in self.named_parameters(): param.data.copy_(torch.from_numpy(kernel))
tests/test_efs/junk_drawer.py
oakbramble/moto
5,460
11128419
def has_status_code(response, code): return response["ResponseMetadata"]["HTTPStatusCode"] == code
sdk/python/kubeflow/pytorchjob/models/v1_py_torch_job_spec.py
happy2048/pytorch-operator
312
11128458
<reponame>happy2048/pytorch-operator # Copyright 2019 kubeflow.org. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # coding: utf-8 """ pytorch Python SDK for PyTorch-Operator # noqa: E501 OpenAPI spec version: v0.1 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six from kubeflow.pytorchjob.models.v1_replica_spec import V1ReplicaSpec # noqa: F401,E501 class V1PyTorchJobSpec(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'active_deadline_seconds': 'int', 'backoff_limit': 'int', 'clean_pod_policy': 'str', 'pytorch_replica_specs': 'dict(str, V1ReplicaSpec)', 'ttl_seconds_after_finished': 'int' } attribute_map = { 'active_deadline_seconds': 'activeDeadlineSeconds', 'backoff_limit': 'backoffLimit', 'clean_pod_policy': 'cleanPodPolicy', 'pytorch_replica_specs': 'pytorchReplicaSpecs', 'ttl_seconds_after_finished': 'ttlSecondsAfterFinished' } def __init__(self, active_deadline_seconds=None, backoff_limit=None, clean_pod_policy=None, pytorch_replica_specs=None, ttl_seconds_after_finished=None): # noqa: E501 """V1PyTorchJobSpec - a model defined in Swagger""" # noqa: E501 self._active_deadline_seconds = None self._backoff_limit = None self._clean_pod_policy = None self._pytorch_replica_specs = None self._ttl_seconds_after_finished = None self.discriminator = None if active_deadline_seconds is not None: self.active_deadline_seconds = active_deadline_seconds if backoff_limit is not None: self.backoff_limit = backoff_limit if clean_pod_policy is not None: self.clean_pod_policy = clean_pod_policy self.pytorch_replica_specs = pytorch_replica_specs if ttl_seconds_after_finished is not None: self.ttl_seconds_after_finished = ttl_seconds_after_finished @property def active_deadline_seconds(self): """Gets the active_deadline_seconds of this V1PyTorchJobSpec. # noqa: E501 Specifies the duration (in seconds) since startTime during which the job can remain active before it is terminated. Must be a positive integer. This setting applies only to pods where restartPolicy is OnFailure or Always. # noqa: E501 :return: The active_deadline_seconds of this V1PyTorchJobSpec. # noqa: E501 :rtype: int """ return self._active_deadline_seconds @active_deadline_seconds.setter def active_deadline_seconds(self, active_deadline_seconds): """Sets the active_deadline_seconds of this V1PyTorchJobSpec. Specifies the duration (in seconds) since startTime during which the job can remain active before it is terminated. Must be a positive integer. This setting applies only to pods where restartPolicy is OnFailure or Always. # noqa: E501 :param active_deadline_seconds: The active_deadline_seconds of this V1PyTorchJobSpec. # noqa: E501 :type: int """ self._active_deadline_seconds = active_deadline_seconds @property def backoff_limit(self): """Gets the backoff_limit of this V1PyTorchJobSpec. # noqa: E501 Number of retries before marking this job as failed. # noqa: E501 :return: The backoff_limit of this V1PyTorchJobSpec. # noqa: E501 :rtype: int """ return self._backoff_limit @backoff_limit.setter def backoff_limit(self, backoff_limit): """Sets the backoff_limit of this V1PyTorchJobSpec. Number of retries before marking this job as failed. # noqa: E501 :param backoff_limit: The backoff_limit of this V1PyTorchJobSpec. # noqa: E501 :type: int """ self._backoff_limit = backoff_limit @property def clean_pod_policy(self): """Gets the clean_pod_policy of this V1PyTorchJobSpec. # noqa: E501 Defines the policy for cleaning up pods after the PyTorchJob completes. Defaults to None. # noqa: E501 :return: The clean_pod_policy of this V1PyTorchJobSpec. # noqa: E501 :rtype: str """ return self._clean_pod_policy @clean_pod_policy.setter def clean_pod_policy(self, clean_pod_policy): """Sets the clean_pod_policy of this V1PyTorchJobSpec. Defines the policy for cleaning up pods after the PyTorchJob completes. Defaults to None. # noqa: E501 :param clean_pod_policy: The clean_pod_policy of this V1PyTorchJobSpec. # noqa: E501 :type: str """ self._clean_pod_policy = clean_pod_policy @property def pytorch_replica_specs(self): """Gets the pytorch_replica_specs of this V1PyTorchJobSpec. # noqa: E501 A map of PyTorchReplicaType (type) to ReplicaSpec (value). Specifies the PyTorch cluster configuration. For example, { \"Master\": PyTorchReplicaSpec, \"Worker\": PyTorchReplicaSpec, } # noqa: E501 :return: The pytorch_replica_specs of this V1PyTorchJobSpec. # noqa: E501 :rtype: dict(str, V1ReplicaSpec) """ return self._pytorch_replica_specs @pytorch_replica_specs.setter def pytorch_replica_specs(self, pytorch_replica_specs): """Sets the pytorch_replica_specs of this V1PyTorchJobSpec. A map of PyTorchReplicaType (type) to ReplicaSpec (value). Specifies the PyTorch cluster configuration. For example, { \"Master\": PyTorchReplicaSpec, \"Worker\": PyTorchReplicaSpec, } # noqa: E501 :param pytorch_replica_specs: The pytorch_replica_specs of this V1PyTorchJobSpec. # noqa: E501 :type: dict(str, V1ReplicaSpec) """ if pytorch_replica_specs is None: raise ValueError("Invalid value for `pytorch_replica_specs`, must not be `None`") # noqa: E501 self._pytorch_replica_specs = pytorch_replica_specs @property def ttl_seconds_after_finished(self): """Gets the ttl_seconds_after_finished of this V1PyTorchJobSpec. # noqa: E501 Defines the TTL for cleaning up finished PyTorchJobs (temporary before Kubernetes adds the cleanup controller). It may take extra ReconcilePeriod seconds for the cleanup, since reconcile gets called periodically. Defaults to infinite. # noqa: E501 :return: The ttl_seconds_after_finished of this V1PyTorchJobSpec. # noqa: E501 :rtype: int """ return self._ttl_seconds_after_finished @ttl_seconds_after_finished.setter def ttl_seconds_after_finished(self, ttl_seconds_after_finished): """Sets the ttl_seconds_after_finished of this V1PyTorchJobSpec. Defines the TTL for cleaning up finished PyTorchJobs (temporary before Kubernetes adds the cleanup controller). It may take extra ReconcilePeriod seconds for the cleanup, since reconcile gets called periodically. Defaults to infinite. # noqa: E501 :param ttl_seconds_after_finished: The ttl_seconds_after_finished of this V1PyTorchJobSpec. # noqa: E501 :type: int """ self._ttl_seconds_after_finished = ttl_seconds_after_finished def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(V1PyTorchJobSpec, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1PyTorchJobSpec): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
organize/apps.py
vanessa/djangogirls
446
11128484
<filename>organize/apps.py from django.apps import AppConfig class OrganizeConfig(AppConfig): name = 'organize'
examples/irevnet/models/model_utils.py
xuanyuzhou98/higher
392
11128500
""" Code for "i-RevNet: Deep Invertible Networks" https://openreview.net/pdf?id=HJsjkMb0Z ICLR 2018 """ import torch import torch.nn as nn from torch.nn import Parameter def split(x): n = int(x.size()[1]/2) x1 = x[:, :n, :, :].contiguous() x2 = x[:, n:, :, :].contiguous() return x1, x2 def merge(x1, x2): return torch.cat((x1, x2), 1) class injective_pad(nn.Module): def __init__(self, pad_size): super(injective_pad, self).__init__() self.pad_size = pad_size self.pad = nn.ZeroPad2d((0, 0, 0, pad_size)) def forward(self, x): x = x.permute(0, 2, 1, 3) x = self.pad(x) return x.permute(0, 2, 1, 3) def inverse(self, x): return x[:, :x.size(1) - self.pad_size, :, :] class psi(nn.Module): def __init__(self, block_size): super(psi, self).__init__() self.block_size = block_size self.block_size_sq = block_size*block_size def inverse(self, input): bl, bl_sq = self.block_size, self.block_size_sq bs, new_d, h, w = input.shape[0], input.shape[1] // bl_sq, input.shape[2], input.shape[3] return input.reshape(bs, bl, bl, new_d, h, w).permute(0, 3, 4, 1, 5, 2).reshape(bs, new_d, h * bl, w * bl) def forward(self, input): bl, bl_sq = self.block_size, self.block_size_sq bs, d, new_h, new_w = input.shape[0], input.shape[1], input.shape[2] // bl, input.shape[3] // bl return input.reshape(bs, d, new_h, bl, new_w, bl).permute(0, 3, 5, 1, 2, 4).reshape(bs, d * bl_sq, new_h, new_w) class ListModule(object): def __init__(self, module, prefix, *args): self.module = module self.prefix = prefix self.num_module = 0 for new_module in args: self.append(new_module) def append(self, new_module): if not isinstance(new_module, nn.Module): raise ValueError('Not a Module') else: self.module.add_module(self.prefix + str(self.num_module), new_module) self.num_module += 1 def __len__(self): return self.num_module def __getitem__(self, i): if i < 0 or i >= self.num_module: raise IndexError('Out of bound') return getattr(self.module, self.prefix + str(i)) def get_all_params(var, all_params): if isinstance(var, Parameter): all_params[id(var)] = var.nelement() elif hasattr(var, "creator") and var.creator is not None: if var.creator.previous_functions is not None: for j in var.creator.previous_functions: get_all_params(j[0], all_params) elif hasattr(var, "previous_functions"): for j in var.previous_functions: get_all_params(j[0], all_params)
tools/GraphiteServer.py
miohtama/RedisTimeSeries
643
11128565
<filename>tools/GraphiteServer.py #!/usr/bin/env python from __future__ import print_function import re import argparse import redis from gevent.server import StreamServer REDIS_POOL = None GRAPHITE_PROTO_RE = re.compile(r"(.*?)\s+([\d.]+)\s+(\d+)") def process_connection(socket, _): """ Per-Connection handler, read all lines and send to redis """ # using a makefile because we want to use readline() rfileobj = socket.makefile(mode='rb') redis_client = redis.Redis(connection_pool=REDIS_POOL) while True: line = rfileobj.readline() if not line: # client disconnect break data = GRAPHITE_PROTO_RE.findall(line) if data: # the line is in graphite format try: path, value, timestamp = data[0] value = float(value) timestamp = int(timestamp) except Exception as ex: print("could parse an element %s" % ex) break try: redis_client.execute_command("ts.add", path, timestamp, value) except redis.ResponseError as ex: # small hack, for performance reasons its better to first try to add an metric # instead of checking per metric if it exists or not if 'the key does not exists' in ex.message: redis_client.execute_command("ts.create", path, MAX_RETENTION, SAMPLES_PER_CHUNK) redis_client.execute_command("ts.add", path, timestamp, value) else: raise else: print("line is not in graphite format: %s" % line) break rfileobj.close() def main(): global REDIS_POOL, MAX_RETENTION, SAMPLES_PER_CHUNK parser = argparse.ArgumentParser() parser.add_argument("--host", help="server address to listen to", default="127.0.0.1") parser.add_argument("--port", help="port number to listen to", default=2003, type=int) parser.add_argument("--redis-server", help="redis server address") parser.add_argument("--redis-port", help="redis server port", default=6379, type=int) parser.add_argument("--max-retention", help="default retention time (in seconds)", default=3600, type=int) parser.add_argument("--samples-per-chunk", help="default samples per memory chunk", default=360, type=int) args = parser.parse_args() MAX_RETENTION = args.max_retention SAMPLES_PER_CHUNK = args.samples_per_chunk REDIS_POOL = redis.ConnectionPool(host=args.redis_server, port=args.redis_port) server = StreamServer((args.host, args.port), process_connection) print('Starting Graphite server on %s:%s' % (args.host, args.port)) server.serve_forever() if __name__ == '__main__': main()
docs/plots/ex_plot_descent_process.py
guillep/DPPy
176
11128583
from dppy.exotic_dpps import DescentProcess dp = DescentProcess() size = 100 dp.sample(size) dp.plot(vs_bernoullis=True)
src/fitly/layouts.py
ethanopp/fitly
107
11128587
<reponame>ethanopp/fitly<gh_stars>100-1000 """Contains layouts suitable for being the value of the 'layout' attribute of Dash app instances. """ from flask import current_app as server import dash_core_components as dcc import dash_html_components as html import dash_bootstrap_components as dbc from .components import make_header, make_sidebar def main_layout_header(): """Dash layout with a top-header""" return html.Div( [ make_header(), dbc.Container( dbc.Row(dbc.Col(id=server.config["CONTENT_CONTAINER_ID"])), fluid=True ), dcc.Location(id=server.config["LOCATION_COMPONENT_ID"], refresh=False), dbc.Toast( id="db-refresh-toast", header="Fit.ly", is_open=False, dismissable=False, icon="danger", # top: 66 positions the toast below the navbar style={"position": "fixed", "top": 66, "right": 10, "width": 350}, children=[ dbc.Row(className='align-items-center text-center', children=[ dbc.Col(className='col-2', children=[dbc.Spinner(size='md', color="danger")]), dbc.Col(className='col-8 text-center', children=['Database Refresh in Progress']) ]) ], ), dcc.Interval(id='db-refresh-toast-interval', interval=3 * 1000, n_intervals=0), ] ) def main_layout_sidebar(): """Dash layout with a sidebar""" return html.Div( [ dbc.Container( fluid=True, children=dbc.Row( [ dbc.Col( make_sidebar(className="px-2"), width=2, className="px-0" ), dbc.Col(id=server.config["CONTENT_CONTAINER_ID"], width=10), ] ), ), dcc.Location(id=server.config["LOCATION_COMPONENT_ID"], refresh=False), ] )
basketball_reference_web_scraper/output/service.py
tttgm/basketball_reference_web_scraper
325
11128617
<filename>basketball_reference_web_scraper/output/service.py from basketball_reference_web_scraper.data import OutputType class OutputService: def __init__(self, json_writer, csv_writer): self.json_writer = json_writer self.csv_writer = csv_writer self.output_type_writers = { OutputType.JSON: self.json_writer, OutputType.CSV: self.csv_writer, } def output(self, data, options): if options.output_type is None: return data writer = self.output_type_writers.get(options.output_type) if writer is None: raise ValueError("Unknown output type: {output_type}".format(output_type=options.output_type)) return writer.write(data=data, options=options)
YaraGuardian/API/urls.py
gcnoopy/YaraGuardian
178
11128634
from django.conf.urls import include, url from rest_framework.authtoken import views import YaraGuardian.API.account import YaraGuardian.API.groups import YaraGuardian.API.rules urlpatterns = [ # url(r'^', include('rest_framework_docs.urls')), url(r'^token-auth/', views.obtain_auth_token), url(r'^account/', include(YaraGuardian.API.account)), url(r'^groups/', include(YaraGuardian.API.groups)), url(r'^rules/', include(YaraGuardian.API.rules)), ]
gnocchi/indexer/alembic/versions/04eba72e4f90_rename_ck_started_before_ended.py
Dmitry-Eremeev/gnocchi
299
11128725
<reponame>Dmitry-Eremeev/gnocchi<gh_stars>100-1000 # Copyright 2019 The Gnocchi Developers # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """rename ck_started_before_ended Revision ID: 04eba72e4f90 Revises: <PASSWORD> Create Date: 2019-10-01 11:19:38.865522 """ from alembic import op from sqlalchemy.engine.reflection import Inspector # revision identifiers, used by Alembic. revision = '04eba72e4f90' down_revision = '<PASSWORD>' branch_labels = None depends_on = None def upgrade(): bind = op.get_bind() inspector = Inspector.from_engine(bind) for table in ("resource", "resource_history"): existing_cks = [ c['name'] for c in inspector.get_check_constraints(table) ] if "ck_started_before_ended" in existing_cks: # Drop non-uniquely named check constraints # for consistency across DB types. op.drop_constraint("ck_started_before_ended", table, type_="check") new_ck_name = "ck_{}_started_before_ended".format(table) if new_ck_name not in existing_cks: # Re-create check constraint with unique name # if needed op.create_check_constraint(new_ck_name, table, "started_at <= ended_at")
benchmarks/compute/fib.py
Schweinepriester/oil
2,209
11128741
<reponame>Schweinepriester/oil #!/usr/bin/env python2 """ fib.py """ from __future__ import print_function import sys def main(argv): try: iters = int(argv[1]) except IndexError: iters = 5 try: n = int(argv[2]) except IndexError: n = 10 i = 0 while i < iters: j = 0 a = 1 b = 1 while j < n: a, b = b, a+b j += 1 print(b) i += 1 if __name__ == '__main__': try: main(sys.argv) except RuntimeError as e: print('FATAL: %s' % e, file=sys.stderr) sys.exit(1)
.ycm_extra_conf.py
meyersbs/vip3r
124
11128811
<reponame>meyersbs/vip3r<gh_stars>100-1000 import os import ycm_core from clang_helpers import PrepareClangFlags compilation_database_folder = '' flags = [ '-Wall', '-Wextra', '-Werror', '-Wno-long-long', '-Wno-variadic-macros', '-fexceptions', '-DNDEBUG', '-DUSE_CLANG_COMPLETER', '-D_HAVE_NOTIFY', '-DPONYMIX_VERSION="1"', '-std=c++14', '-x', 'c++', '-I/usr/include/glib-2.0', '-I/usr/lib/glib-2.0/include', ] if compilation_database_folder: database = ycm_core.CompilationDatabase( compilation_database_folder ) else: database = None def DirectoryOfThisScript(): return os.path.dirname( os.path.abspath( __file__ ) ) def MakeRelativePathsInFlagsAbsolute( flags, working_directory ): if not working_directory: return flags new_flags = [] make_next_absolute = False path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ] for flag in flags: new_flag = flag if make_next_absolute: make_next_absolute = False if not flag.startswith( '/' ): new_flag = os.path.join( working_directory, flag ) for path_flag in path_flags: if flag == path_flag: make_next_absolute = True break if flag.startswith( path_flag ): path = flag[ len( path_flag ): ] new_flag = path_flag + os.path.join( working_directory, path ) break if new_flag: new_flags.append( new_flag ) return new_flags def FlagsForFile( filename ): if database: # Bear in mind that compilation_info.compiler_flags_ does NOT return a # python list, but a "list-like" StringVec object compilation_info = database.GetCompilationInfoForFile( filename ) final_flags = PrepareClangFlags( MakeRelativePathsInFlagsAbsolute( compilation_info.compiler_flags_, compilation_info.compiler_working_dir_ ), filename ) # NOTE: This is just for YouCompleteMe; it's highly likely that your project # does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR # ycm_extra_conf IF YOU'RE NOT 100% YOU NEED IT. try: final_flags.remove( '-stdlib=libc++' ) except ValueError: pass else: relative_to = DirectoryOfThisScript() final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to ) return { 'flags': final_flags, 'do_cache': True }
pytorch_wrapper/evaluators.py
skatsaounis/pytorch-wrapper
111
11128864
import numpy as np from abc import ABC, abstractmethod from sklearn import metrics from . import functional as pwF class AbstractEvaluatorResults(ABC): """ Objects of derives classes encapsulate results of an evaluation metric. """ @abstractmethod def is_better_than(self, other_results_object): """ Compares these results with the results of another object. :param other_results_object: Object of the same class. """ pass @abstractmethod def compare_to(self, other_results_object): """ Compares these results with the results of another object. :param other_results_object: Object of the same class. """ pass @abstractmethod def __str__(self): pass def __repr__(self): return self.__str__() class GenericEvaluatorResults(AbstractEvaluatorResults): """ Generic evaluator results. """ def __init__(self, score, label='score', score_format='%f', is_max_better=True): """ :param score: Numeric value that represents the score. :param label: String used in the str representation. :param score_format: Format String used in the str representation. :param is_max_better: Flag that signifies if larger means better. """ super(GenericEvaluatorResults, self).__init__() self._score = score self._label = label self._score_format = score_format self._is_max_better = is_max_better @property def score(self): return self._score @property def is_max_better(self): return self._is_max_better def is_better_than(self, other_results_object): if other_results_object is None: return True if self._is_max_better: return self.compare_to(other_results_object) > 0 else: return self.compare_to(other_results_object) < 0 def compare_to(self, other_results_object): return self._score - other_results_object.score def __str__(self): return (self._label + ': ' + self._score_format) % self._score class AbstractEvaluator(ABC): """ Objects of derived classes are used to evaluate a model on a dataset using a specific metric. """ def __init__(self): self.reset() @abstractmethod def reset(self): """ (Re)initializes the object. Called at the beginning of the evaluation step. """ pass @abstractmethod def step(self, output, batch, last_activation=None): """ Gathers information needed for performance measurement about a single batch. Called after each batch in the evaluation step. :param output: Output of the model. :param batch: Dict that contains all information needed for a single batch by the evaluator. :param last_activation: The last activation of the model. Some losses work with logits and as such the last activation might not be performed inside the model's forward method. """ pass @abstractmethod def calculate(self): """ Called after all batches have been processed. Calculates the metric. :return: AbstractEvaluatorResults object. """ pass def calculate_at_once(self, output, dataset, last_activation=None): """ Calculates the metric at once for the whole dataset. :param output: Output of the model. :param dataset: Dict that contains all information needed for a dataset by the evaluator. :param last_activation: The last activation of the model. Some losses work with logits and as such the last activation might not be performed inside the model's forward method. :return: AbstractEvaluatorResults object. """ self.reset() self.step(output, dataset, last_activation) return self.calculate() class GenericPointWiseLossEvaluator(AbstractEvaluator): """ Adapter that uses an object of a class derived from AbstractLossWrapper to calculate the loss during evaluation. """ def __init__(self, loss_wrapper, label='loss', score_format='%f', batch_target_key='target'): """ :param loss_wrapper: AbstractLossWrapper object that calculates the loss. :param label: Str used as label during printing of the loss. :param score_format: Format used for str representation of the loss. :param batch_target_key: Key where the dict (batch) contains the target values. """ super(GenericPointWiseLossEvaluator, self).__init__() self._loss_wrapper = loss_wrapper self._label = label self._score_format = score_format self._batch_target_key = batch_target_key self.reset() def reset(self): self._loss = 0 self._examples_nb = 0 def step(self, output, batch, last_activation=None): current_loss = self._loss_wrapper.calculate_loss(output, batch, None, last_activation).item() self._loss += current_loss * batch[self._batch_target_key].shape[0] self._examples_nb += batch[self._batch_target_key].shape[0] def calculate(self): return GenericEvaluatorResults( self._loss / self._examples_nb, self._label, self._score_format, is_max_better=False ) class AccuracyEvaluator(AbstractEvaluator): """ Accuracy evaluator. """ def __init__(self, threshold=0.5, model_output_key=None, batch_target_key='target'): """ :param threshold: Threshold above which an example is considered positive. :param model_output_key: Key where the dict returned by the model contains the actual predictions. Leave None if the model returns only the predictions. :param batch_target_key: Key where the dict (batch) contains the target values. """ super(AccuracyEvaluator, self).__init__() self._threshold = threshold self._model_output_key = model_output_key self._batch_target_key = batch_target_key self.reset() def reset(self): self._outputs = [] self._targets = [] def step(self, output, batch, last_activation=None): if self._model_output_key is not None: output = output[self._model_output_key] if last_activation is not None: output = last_activation(output) self._outputs.extend(output.tolist()) self._targets.extend(batch[self._batch_target_key].tolist()) def calculate(self): predictions = np.array(self._outputs) > self._threshold targets = np.array(self._targets) > self._threshold correct = (predictions == targets).sum() return GenericEvaluatorResults( 100.0 * correct / predictions.size, 'acc', '%5.2f%%', is_max_better=True ) class MultiClassAccuracyEvaluator(AbstractEvaluator): """ Multi-Class Accuracy evaluator. """ def __init__(self, model_output_key=None, batch_target_key='target'): """ :param model_output_key: Key where the dict returned by the model contains the actual predictions. Leave None if the model returns only the predictions. :param batch_target_key: Key where the dict (batch) contains the target values. """ super(MultiClassAccuracyEvaluator, self).__init__() self._model_output_key = model_output_key self._batch_target_key = batch_target_key self.reset() def reset(self): self._outputs = [] self._targets = [] def step(self, output, batch, last_activation=None): if self._model_output_key is not None: output = output[self._model_output_key] self._outputs.extend(output.tolist()) self._targets.extend(batch[self._batch_target_key].tolist()) def calculate(self): predictions = np.array(self._outputs).argmax(axis=-1) correct = (predictions == self._targets).sum() return GenericEvaluatorResults( 100.0 * correct / predictions.shape[0], 'acc', '%5.2f%%', is_max_better=True ) class AUROCEvaluator(AbstractEvaluator): """ AUROC evaluator. """ def __init__(self, model_output_key=None, batch_target_key='target', average='macro', target_threshold=0.5): """ :param model_output_key: Key where the dict returned by the model contains the actual predictions. Leave None if the model returns only the predictions. :param batch_target_key: Key where the dict (batch) contains the target values. :param average: Type ['macro' or 'micro'] of averaging performed on the results in case of multi-label task. """ super(AUROCEvaluator, self).__init__() self._model_output_key = model_output_key self._batch_target_key = batch_target_key self._average = average self._target_threshold = target_threshold self.reset() def reset(self): self._outputs = [] self._targets = [] def step(self, output, batch, last_activation=None): if self._model_output_key is not None: output = output[self._model_output_key] if last_activation is not None: output = last_activation(output) self._outputs.extend(output.tolist()) self._targets.extend(batch[self._batch_target_key].tolist()) def calculate(self): return GenericEvaluatorResults(metrics.roc_auc_score( y_score=np.array(self._outputs, dtype='float32'), y_true=np.array(self._targets) > self._target_threshold, average=self._average ), 'auroc', '%5.4f', is_max_better=True) class PrecisionEvaluator(AbstractEvaluator): """ Precision evaluator. """ def __init__(self, threshold=0.5, model_output_key=None, batch_target_key='target', average='binary'): """ :param threshold: Threshold above which an example is considered positive. :param model_output_key: Key where the dict returned by the model contains the actual predictions. Leave None if the model returns only the predictions. :param batch_target_key: Key where the dict (batch) contains the target values. :param average: Type ['binary', 'macro' or 'micro'] of averaging performed on the results. """ super(PrecisionEvaluator, self).__init__() self._threshold = threshold self._model_output_key = model_output_key self._batch_target_key = batch_target_key self._average = average self.reset() def reset(self): self._outputs = [] self._targets = [] def step(self, output, batch, last_activation=None): if self._model_output_key is not None: output = output[self._model_output_key] if last_activation is not None: output = last_activation(output) self._outputs.extend(output.tolist()) self._targets.extend(batch[self._batch_target_key].tolist()) def calculate(self): return GenericEvaluatorResults(metrics.precision_score( y_pred=np.array(self._outputs) > self._threshold, y_true=np.array(self._targets) > self._threshold, average=self._average ), self._average + '-precision', '%5.4f', is_max_better=True) class MultiClassPrecisionEvaluator(AbstractEvaluator): """ Multi-Class Precision evaluator. """ def __init__(self, model_output_key=None, batch_target_key='target', average='macro'): """ :param model_output_key: Key where the dict returned by the model contains the actual predictions. Leave None if the model returns only the predictions. :param batch_target_key: Key where the dict (batch) contains the target values. :param average: Type ['macro' or 'micro'] of averaging performed on the results. """ super(MultiClassPrecisionEvaluator, self).__init__() self._model_output_key = model_output_key self._batch_target_key = batch_target_key self._average = average self.reset() def reset(self): self._outputs = [] self._targets = [] def step(self, output, batch, last_activation=None): if self._model_output_key is not None: output = output[self._model_output_key] if last_activation is not None: output = last_activation(output) self._outputs.extend(output.tolist()) self._targets.extend(batch[self._batch_target_key].tolist()) def calculate(self): return GenericEvaluatorResults(metrics.precision_score( y_pred=np.array(self._outputs).argmax(axis=-1), y_true=np.array(self._targets), average=self._average ), self._average + '-precision', '%5.4f', is_max_better=True) class RecallEvaluator(AbstractEvaluator): """ Recall evaluator. """ def __init__(self, threshold=0.5, model_output_key=None, batch_target_key='target', average='binary'): """ :param threshold: Threshold above which an example is considered positive. :param model_output_key: Key where the dict returned by the model contains the actual predictions. Leave None if the model returns only the predictions. :param batch_target_key: Key where the dict (batch) contains the target values. :param average: Type ['binary', 'macro' or 'micro'] of averaging performed on the results. """ super(RecallEvaluator, self).__init__() self._threshold = threshold self._model_output_key = model_output_key self._batch_target_key = batch_target_key self._average = average self.reset() def reset(self): self._outputs = [] self._targets = [] def step(self, output, batch, last_activation=None): if self._model_output_key is not None: output = output[self._model_output_key] if last_activation is not None: output = last_activation(output) self._outputs.extend(output.tolist()) self._targets.extend(batch[self._batch_target_key].tolist()) def calculate(self): return GenericEvaluatorResults(metrics.recall_score( y_pred=np.array(self._outputs) > self._threshold, y_true=np.array(self._targets) > self._threshold, average=self._average ), self._average + '-recall', '%5.4f', is_max_better=True) class MultiClassRecallEvaluator(AbstractEvaluator): """ Multi-Class Recall evaluator. """ def __init__(self, model_output_key=None, batch_target_key='target', average='macro'): """ :param model_output_key: Key where the dict returned by the model contains the actual predictions. Leave None if the model returns only the predictions. :param batch_target_key: Key where the dict (batch) contains the target values. :param average: Type ['macro' or 'micro'] of averaging performed on the results. """ super(MultiClassRecallEvaluator, self).__init__() self._model_output_key = model_output_key self._batch_target_key = batch_target_key self._average = average self.reset() def reset(self): self._outputs = [] self._targets = [] def step(self, output, batch, last_activation=None): if self._model_output_key is not None: output = output[self._model_output_key] if last_activation is not None: output = last_activation(output) self._outputs.extend(output.tolist()) self._targets.extend(batch[self._batch_target_key].tolist()) def calculate(self): return GenericEvaluatorResults(metrics.recall_score( y_pred=np.array(self._outputs).argmax(axis=-1), y_true=np.array(self._targets), average=self._average ), self._average + '-recall', '%5.4f', is_max_better=True) class F1Evaluator(AbstractEvaluator): """ F1 evaluator. """ def __init__(self, threshold=0.5, model_output_key=None, batch_target_key='target', average='binary'): """ :param threshold: Threshold above which an example is considered positive. :param model_output_key: Key where the dict returned by the model contains the actual predictions. Leave None if the model returns only the predictions. :param batch_target_key: Key where the dict (batch) contains the target values. :param average: Type ['binary', 'macro' or 'micro'] of averaging performed on the results. """ super(F1Evaluator, self).__init__() self._threshold = threshold self._model_output_key = model_output_key self._batch_target_key = batch_target_key self._average = average self.reset() def reset(self): self._outputs = [] self._targets = [] def step(self, output, batch, last_activation=None): if self._model_output_key is not None: output = output[self._model_output_key] if last_activation is not None: output = last_activation(output) self._outputs.extend(output.tolist()) self._targets.extend(batch[self._batch_target_key].tolist()) def calculate(self): return GenericEvaluatorResults(metrics.f1_score( y_pred=np.array(self._outputs) > self._threshold, y_true=np.array(self._targets) > self._threshold, average=self._average ), self._average + '-f1', '%5.4f', is_max_better=True) class MultiClassF1Evaluator(AbstractEvaluator): """ Multi-Class F1 evaluator. """ def __init__(self, model_output_key=None, batch_target_key='target', average='macro'): """ :param model_output_key: Key where the dict returned by the model contains the actual predictions. Leave None if the model returns only the predictions. :param batch_target_key: Key where the dict (batch) contains the target values. :param average: Type ['macro' or 'micro'] of averaging performed on the results. """ super(MultiClassF1Evaluator, self).__init__() self._model_output_key = model_output_key self._batch_target_key = batch_target_key self._average = average self.reset() def reset(self): self._outputs = [] self._targets = [] def step(self, output, batch, last_activation=None): if self._model_output_key is not None: output = output[self._model_output_key] if last_activation is not None: output = last_activation(output) self._outputs.extend(output.tolist()) self._targets.extend(batch[self._batch_target_key].tolist()) def calculate(self): return GenericEvaluatorResults(metrics.f1_score( y_pred=np.array(self._outputs).argmax(axis=-1), y_true=np.array(self._targets), average=self._average ), self._average + '-f1', '%5.4f', is_max_better=True) class TokenLabelingEvaluatorWrapper(AbstractEvaluator): """ Adapter that wraps an evaluator. It is used in token labeling tasks in order to flat the output and target while discarding invalid values due to padding. """ def __init__(self, evaluator, batch_input_sequence_length_idx, batch_input_key='input', model_output_key=None, batch_target_key='target', end_padded=True): """ :param evaluator: The evaluator. :param batch_input_sequence_length_idx: The index of the input list where the lengths of the sequences can be found. :param batch_input_key: Key of the Dicts returned by the Dataloader objects that corresponds to the input of the model. :param model_output_key: Key where the dict returned by the model contains the actual predictions. Leave None if the model returns only the predictions. :param batch_target_key: Key where the dict (batch) contains the target values. :param end_padded: Whether the sequences are end-padded. """ self._evaluator = evaluator super(TokenLabelingEvaluatorWrapper, self).__init__() self._batch_input_sequence_length_idx = batch_input_sequence_length_idx self._batch_input_key = batch_input_key self._model_output_key = model_output_key self._batch_target_key = batch_target_key self._end_padded = end_padded self.reset() def reset(self): self._evaluator.reset() def step(self, output, batch, last_activation=None): if self._model_output_key is not None: output = output[self._model_output_key] mask = pwF.create_mask_from_length( batch[self._batch_input_key][self._batch_input_sequence_length_idx].to(output.device), output.shape[1], self._end_padded ).view(-1) new_output = output.view(output.shape[0] * output.shape[1], -1).squeeze(-1) batch_targets = batch[self._batch_target_key] batch_targets = batch_targets.view(batch_targets.shape[0] * batch_targets.shape[1], -1).squeeze(-1) new_output = new_output[mask] batch_targets = batch_targets[mask] new_batch = {k: batch[k] for k in batch if k != self._batch_target_key} new_batch[self._batch_target_key] = batch_targets self._evaluator.step(new_output, new_batch, last_activation) def calculate(self): return self._evaluator.calculate()
modules/usm.py
tansyab1/LightNetPlus
240
11128921
<gh_stars>100-1000 import math import torch import numpy as np import torch.nn as nn import torch.nn.functional as F from modules.inplace_abn.iabn import ABN def gauss_kernel(kernel_size, sigma): # Create a x, y coordinate grid of shape (kernel_size, kernel_size, 2) x_cord = torch.arange(kernel_size) x_grid = x_cord.repeat(kernel_size).view(kernel_size, kernel_size) y_grid = x_grid.t() xy_grid = torch.stack([x_grid, y_grid], dim=-1) mean = (kernel_size - 1) / 2. variance = sigma ** 2 # Calculate the 2-dimensional gaussian kernel which is # the product of two gaussian distributions for two different # variables (in this case called x and y) gaussian_kernel = (1. / (2. * math.pi * variance)) * \ torch.exp(-torch.sum((xy_grid.float() - mean) ** 2, dim=-1) / (2. * variance)) # Make sure sum of values in gaussian kernel equals 1. gaussian_kernel = gaussian_kernel / torch.sum(gaussian_kernel) # Reshape to 2d depth-wise convolution weight gaussian_kernel = gaussian_kernel.view(1, 1, kernel_size, kernel_size) # [1, 1 , H, W] return gaussian_kernel # class UnsharpMask(nn.Module): # def __init__(self, channels, padding=3, amount=1.0, threshold=0, norm_act=ABN): # super(UnsharpMask, self).__init__() # self.channels = channels # self.padding = padding # # self.amount = amount # self.threshold = threshold # # self.norm_act = norm_act(channels) # # def forward(self, x, gauss_filter): # x = self.norm_act(x) # res = x.clone() # # gauss_filter = gauss_filter.repeat(self.channels, 1, 1, 1) # blurred = F.conv2d(input=x, weight=gauss_filter, stride=1, padding=self.padding, groups=x.size(1), bias=None) # # sharpened = res * (self.amount + 1.0) - blurred * self.amount # # if self.threshold > 0: # sharpened = torch.where(torch.abs(res - blurred) < self.threshold, sharpened, res) # # return sharpened # , res - blurred class UnsharpMaskV2(nn.Module): def __init__(self, channel, kernel_size=7, padding=3, amount=1.0, threshold=0, norm_act=ABN): super(UnsharpMaskV2, self).__init__() self.kernel_size = kernel_size self.padding = padding self.amount = amount self.threshold = threshold self.norm_act = norm_act(channel) def forward(self, x): x = self.norm_act(x) res = x.clone() blurred = F.avg_pool2d(input=x, kernel_size=self.kernel_size, stride=1, padding=self.padding, ceil_mode=False, count_include_pad=True) sharpened = res * (self.amount + 1.0) - blurred * self.amount if self.threshold > 0: sharpened = torch.where(torch.abs(res - blurred) < self.threshold, sharpened, res) return sharpened # , res - blurred class GaussianBlur(nn.Module): def __init__(self, channels, kernel_size=11, padding=5, sigma=1.6): super(GaussianBlur, self).__init__() self.kernel_size = kernel_size self.channels = channels self.padding = padding self.sigma = sigma weights = self.calculate_weights() self.register_buffer('gaussian_filter', weights) def calculate_weights(self): # Create a x, y coordinate grid of shape (kernel_size, kernel_size, 2) x_cord = torch.arange(self.kernel_size) x_grid = x_cord.repeat(self.kernel_size).view(self.kernel_size, self.kernel_size) y_grid = x_grid.t() xy_grid = torch.stack([x_grid, y_grid], dim=-1) mean = (self.kernel_size - 1) / 2. variance = self.sigma ** 2 # Calculate the 2-dimensional gaussian kernel which is # the product of two gaussian distributions for two different # variables (in this case called x and y) gaussian_kernel = (1. / (2. * math.pi * variance)) * \ torch.exp(-torch.sum((xy_grid.float() - mean) ** 2, dim=-1) / (2. * variance)) # Make sure sum of values in gaussian kernel equals 1. gaussian_kernel = gaussian_kernel / torch.sum(gaussian_kernel) # Reshape to 2d depthwise convolutional weight gaussian_kernel = gaussian_kernel.view(1, 1, self.kernel_size, self.kernel_size) gaussian_kernel = gaussian_kernel.repeat(self.channels, 1, 1, 1) return gaussian_kernel def forward(self, x): return F.conv2d(input=x, weight=self.gaussian_filter, stride=1, padding=self.padding, groups=x.size(1), bias=None) class UnsharpMask(nn.Module): def __init__(self, channels, kernel_size=11, padding=5, sigma=1.0, amount=1.0, threshold=0, norm_act=ABN): super(UnsharpMask, self).__init__() self.amount = amount self.threshold = threshold self.norm_act = norm_act(channels) self.gauss_blur = GaussianBlur(channels=channels, kernel_size=kernel_size, padding=padding, sigma=sigma) def forward(self, x): x = self.norm_act(x) res = x.clone() blurred = self.gauss_blur(x) sharpened = res * (self.amount + 1.0) - blurred * self.amount if self.threshold > 0: sharpened = torch.where(torch.abs(res - blurred) < self.threshold, sharpened, res) return sharpened if __name__ == "__main__": import imageio import matplotlib import matplotlib.pyplot as plt image = imageio.imread("/home/liuhuijun/PycharmProjects/LightNet++/deploy/cityscapes/examples/images/munster_000168_000019_leftImg8bit.png") # image = np.array(image[:, :, ::-1], dtype=np.uint8) img_copy = image.copy() image = image.transpose(2, 0, 1) # From HWC to CHW (For PyTorch we use N*C*H*W tensor) image = torch.from_numpy(image).float() image = torch.unsqueeze(image, dim=0).cuda() # [N, C, H, W] usm = UnsharpMask(channels=3, kernel_size=11, padding=5, sigma=1.6).cuda() usm.eval() with torch.no_grad(): dummy_out = usm(image) dummy_out = np.squeeze(dummy_out.cpu().numpy()).transpose(1, 2, 0).astype(np.uint8) # mask = np.squeeze(mask.cpu().numpy()).transpose(1, 2, 0).astype(np.uint8) # blur = np.squeeze(blur.cpu().numpy()).transpose(1, 2, 0).astype(np.uint8) fig, axs = plt.subplots(ncols=3, figsize=(13.5, 6)) axs[0].imshow(img_copy) axs[0].get_xaxis().set_visible(False) axs[0].get_yaxis().set_visible(False) axs[0].set_title("Org Image") axs[1].imshow(dummy_out) axs[1].get_xaxis().set_visible(False) axs[1].get_yaxis().set_visible(False) axs[1].set_title("Sharpened Image") # axs[2].imshow(mask, cmap="gray") # axs[2].get_xaxis().set_visible(False) # axs[2].get_yaxis().set_visible(False) # axs[2].set_title("Mask Image") plt.tight_layout() plt.show()
tests/lamvery/env_test.py
rdtr/lamvery
101
11128991
# -*- coding: utf-8 -*- import os from unittest import TestCase from nose.tools import eq_ import lamvery.env ENV_JSON = ''' { "foo": "bar", "baz": "qux" } ''' class FunctionsTestCase(TestCase): def setUp(self): open(lamvery.env.ENV_FILE_NAME, 'w').write(ENV_JSON) if 'foo' in os.environ: del os.environ['foo'] def tearDown(self): os.remove(lamvery.env.ENV_FILE_NAME) def test_load(self): lamvery.env.load() eq_(os.environ.get('foo'), 'bar') def test_load_invalid(self): open(lamvery.env.ENV_FILE_NAME, 'w').write('foo') lamvery.env.load() eq_(os.environ.get('foo'), None)
src/timeago/locales/ca.py
nmb10/timeago
220
11129019
<reponame>nmb10/timeago<gh_stars>100-1000 #!/usr/bin/env python # -*- coding: utf-8 -*- ''' Created on 2017-8-30 @author: generated by @lolobosse script ''' LOCALE = [ ["fa un moment", "d'aquí un moment"], ["fa %s segons", "d'aquí %s segons"], ["fa 1 minut", "d'aquí 1 minut"], ["fa %s minuts", "d'aquí %s minuts"], ["fa 1 hora", "d'aquí 1 hora"], ["fa %s hores", "d'aquí %s hores"], ["fa 1 dia", "d'aquí 1 dia"], ["fa %s dies", "d'aquí %s dies"], ["fa 1 setmana", "d'aquí 1 setmana"], ["fa %s setmanes", "d'aquí %s setmanes"], ["fa 1 mes", "d'aquí 1 mes"], ["fa %s mesos", "d'aquí %s mesos"], ["fa 1 any", "d'aquí 1 any"], ["fa %s anys", "d'aquí %s anys"] ]
catboost/tools/limited_precision_numpy_diff/main.py
jochenater/catboost
6,989
11129078
<reponame>jochenater/catboost<filename>catboost/tools/limited_precision_numpy_diff/main.py import argparse import numpy as np def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--rtol', default=1.e-5) parser.add_argument('--atol', default=1.e-8) parser.add_argument('npyArrayPath1') parser.add_argument('npyArrayPath2') return parser.parse_args() def main(): args = parse_args() array1 = np.load(args.npyArrayPath1) array2 = np.load(args.npyArrayPath2) assert np.allclose(array1, array2, args.rtol, args.atol) if __name__ == "__main__": main()
medspacy/common/__init__.py
lusterck/medspacy
197
11129081
<reponame>lusterck/medspacy from .base_rule import BaseRule
package/tests/test_integrations/test_pypi.py
deepyaman/kedro-viz
125
11129109
import pytest from semver import VersionInfo from kedro_viz.integrations.pypi import get_latest_version, is_running_outdated_version def test_get_latest_version(mocker, mock_http_response): mock_version = "1.0.0" requests_get = mocker.patch("requests.get") requests_get.return_value = mock_http_response( data={"info": {"version": mock_version}} ) assert get_latest_version() == VersionInfo.parse(mock_version) @pytest.mark.parametrize( "installed_version, latest_version, is_outdated", [("1.0.0", "1.2.3", True), ("1.2.3", "1.0.0", False), ("1.0.0", None, False)], ) def test_is_running_outdated_version(installed_version, latest_version, is_outdated): installed_version = VersionInfo.parse(installed_version) if latest_version is not None: latest_version = VersionInfo.parse(latest_version) assert is_running_outdated_version(installed_version, latest_version) == is_outdated
depot/fields/interfaces.py
tcmike/depot
128
11129156
<filename>depot/fields/interfaces.py from abc import ABCMeta, abstractmethod from depot._compat import with_metaclass from depot.manager import DepotManager class FileFilter(with_metaclass(ABCMeta, object)): """Interface that must be implemented by file filters. File filters get executed whenever a file is stored on the database using one of the supported fields. Can be used to add additional data to the stored file or change it. When file filters are run the file has already been stored. """ @abstractmethod def on_save(self, uploaded_file): # pragma: no cover """Filters are required to provide their own implementation""" return class DepotFileInfo(with_metaclass(ABCMeta, dict)): """Keeps information on a content related to a specific depot. By itself the DepotFileInfo does nothing, it is required to implement a :meth:`process_content` method that actually saves inside the file info the information related to the content. The only information which is saved by default is the depot name itself. It is a specialized dictionary that provides also attribute style access, the dictionary parent permits easy encoding/decoding to most marshalling systems. """ def __init__(self, content, depot_name=None): super(DepotFileInfo, self).__init__() self._thaw() if isinstance(content, dict): object.__setattr__(self, 'original_content', None) self.update(content) else: object.__setattr__(self, 'original_content', content) if depot_name is None: depot_name = DepotManager.get_default() depot_name = DepotManager.resolve_alias(depot_name) if not depot_name: raise ValueError('Storage has not been found in DEPOT') self['depot_name'] = depot_name self['files'] = [] self.process_content(content) self._freeze() @abstractmethod def process_content(self, content, filename=None, content_type=None): # pragma: no cover """Process content in the given depot. This is implemented by subclasses to provide some kind of behaviour on the content in the related Depot. The default implementation is provided by :class:`depot.fields.upload.UploadedFile` which stores the content into the depot. """ return def __getitem__(self, key): return dict.__getitem__(self, key) def __getattr__(self, name): try: return self[name] except KeyError: raise AttributeError(name) def __setitem__(self, key, value): if object.__getattribute__(self, '_frozen'): raise TypeError('Already saved files are immutable') return dict.__setitem__(self, key, value) __setattr__ = __setitem__ def __delattr__(self, name): if object.__getattribute__(self, '_frozen'): raise TypeError('Already saved files are immutable') try: del self[name] except KeyError: raise AttributeError(name) def __delitem__(self, key): if object.__getattribute__(self, '_frozen'): raise TypeError('Already saved files are immutable') dict.__delitem__(self, key) def _apply_filters(self, filters): if self.original_content is not None: self._thaw() for filt in filters: filt.on_save(self) self._freeze() def _freeze(self): object.__setattr__(self, '_frozen', True) def _thaw(self): object.__setattr__(self, '_frozen', False)
nbtutor/__init__.py
vincentxavier/nbtutor
423
11129229
<gh_stars>100-1000 from .version import __version__ # noqa F401 from .version import __version_info__ # noqa F401 def _jupyter_server_extension_paths(): return [dict(module="nbtutor")] def _jupyter_nbextension_paths(): return [dict( section="notebook", src="static", dest="nbtutor", require="nbtutor/nbtutor.notebook", )] def load_jupyter_server_extension(nbapp): pass def load_ipython_extension(ip): from .ipython.magic import NbtutorMagics ip.register_magics(NbtutorMagics)
scripts/notebook-history-s3/notebook-history-s3.py
homegate-engineering/amazon-sagemaker-notebook-instance-lifecycle-config-samples
291
11129253
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # https://aws.amazon.com/apache-2-0/ # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. import requests from datetime import datetime import getopt, sys import boto3 import json import sagemaker import urllib3 import logging urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) # Usage usageInfo = """Usage: This scripts gets the sqllite database of sessions and jupyter history and writes the spllite files to S3: python log_notebook_history. Type "python autostop.py -h" for available options. """ # Help info helpInfo = """ -h, --help Help information """ logging.basicConfig(level=logging.INFO, format='%(message)s') logger = logging.getLogger() logger.addHandler(logging.FileHandler('/var/log/notebook_history_s3.log', 'a')) # Read in command-line parameters try: opts, args = getopt.getopt(sys.argv[1:], "h", ["help"]) for opt, arg in opts: if opt in ("-h", "--help"): print(helpInfo) exit(0) except getopt.GetoptError: print(usageInfo) exit(1) def get_notebook_name(): log_path = "/opt/ml/metadata/resource-metadata.json" with open(log_path, "r") as logs: _logs = json.load(logs) return _logs["ResourceName"] sagemaker_session = sagemaker.Session() s3 = boto3.client("s3") bucket = sagemaker_session.default_bucket() key = "notebooks/{}/history/{}/history.sqlite".format(get_notebook_name(), datetime.now().strftime("%Y%m%d-%H%M%S")) logger.info("Writing history.sqlite to {}/{}".format(bucket,key)) with open('/home/ec2-user/.ipython/profile_default/history.sqlite', 'rb') as data: s3.upload_fileobj(data, bucket, key)
lib/redfin/constants.py
goztrk/django-htk
206
11129267
REDFIN_API_BASE_URL = 'https://www.redfin.com' REDFIN_API_ENDPOINTS = { 'get_property_listing_id' : '/stingray/do/api-get-property-listing-id', 'get_avm' : '/stingray/api/home/details/avm', 'get_property_parcel_info' : '/stingray/api/home/details/propertyParcelInfo', }
src/pretalx/event/migrations/0024_remove_team_review_override_votes.py
lili668668/pretalx
418
11129309
<reponame>lili668668/pretalx # Generated by Django 3.1 on 2020-09-24 21:08 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ("event", "0023_update_featured_visibility"), ] operations = [ migrations.RemoveField( model_name="team", name="review_override_votes", ), ]
workflows/pipe-common/scripts/configure_system_settings_win.py
ZMaratovna/cloud-pipeline
126
11129328
<reponame>ZMaratovna/cloud-pipeline # Copyright 2017-2021 EPAM Systems, Inc. (https://www.epam.com/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pipeline.utils.reg import set_local_machine_dword_value _win_policies_reg_path = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Policies\\System' _win_personalization_reg_path = 'SOFTWARE\\Policies\\Microsoft\\Windows\\Personalization' _win_start_menu_reg_path = 'SOFTWARE\\Microsoft\\PolicyManager\\default\\Start' def configure_system_settings_win(): set_local_machine_dword_value(_win_policies_reg_path, 'disablecad', 1) set_local_machine_dword_value(_win_personalization_reg_path, 'NoLockScreen', 1) set_local_machine_dword_value(_win_start_menu_reg_path + '\\HideShutDown', 'value', 1) set_local_machine_dword_value(_win_start_menu_reg_path + '\\HideRestart', 'value', 1)
angrmanagement/ui/widgets/qblock_label.py
DennyDai/angr-management
474
11129344
<reponame>DennyDai/angr-management<filename>angrmanagement/ui/widgets/qblock_label.py from PySide2.QtGui import QPainter from PySide2.QtWidgets import QGraphicsSimpleTextItem from PySide2.QtCore import Qt, QRectF from ...config import Conf from .qgraph_object import QCachedGraphicsItem class QBlockLabel(QCachedGraphicsItem): def __init__(self, addr, text, config, disasm_view, workspace, infodock, parent=None): super().__init__(parent=parent) self.workspace = workspace self.addr = addr self.text = text self._width = 0 self._height = 0 self.infodock = infodock self._config = config self._disasm_view = disasm_view self._text_item: QGraphicsSimpleTextItem = None self._init_widgets() def paint(self, painter, option, widget): #pylint: disable=unused-argument painter.setRenderHints( QPainter.Antialiasing | QPainter.SmoothPixmapTransform | QPainter.HighQualityAntialiasing) painter.setFont(self._config.code_font) # background if self.infodock.is_label_selected(self.addr): highlight_color = Conf.disasm_view_label_highlight_color painter.setBrush(highlight_color) painter.setPen(highlight_color) painter.drawRect(0, 0, self.width, self.height) # # Event handlers # def mousePressEvent(self, event): if event.button() == Qt.LeftButton: self.infodock.select_label(self.addr) # # Private methods # def _init_widgets(self): self._text_item = QGraphicsSimpleTextItem(self.text, self) self._text_item.setBrush(Conf.disasm_view_label_color) self._text_item.setFont(self._config.disasm_font) self._layout_items_and_update_size() def _layout_items_and_update_size(self): self._text_item.setPos(0, 0) self._width = self._text_item.boundingRect().width() self._height = self._text_item.boundingRect().height() self.recalculate_size() def _boundingRect(self): return QRectF(0, 0, self._width, self._height)
atx/ext/gt.py
jamjven/ATX
1,132
11129399
<reponame>jamjven/ATX #!/usr/bin/env python # -*- coding: utf-8 -*- # # extention for http://gt.qq.com/ # reference doc http://gt.qq.com/docs/a/UseGtWithBroadcast.txt # # Experimental, maybe change in the future # Created by <hzsunshx> 2016-06-12 import functools class GT(object): def __init__(self, d): self.d = d self._broadcast = functools.partial(self.d.adb_device.shell, 'am', 'broadcast', '-a') self._package_name = None def start_test(self, package_name, cpu=True, net=True, pss=True): self._package_name = package_name broadcast = self._broadcast # 1. start app self.quit() # reset gt self.d.start_app('com.tencent.wstt.gt')#, 'com.tencent.wstt.gt.activity.GTMainActivity') # 2. set test package name broadcast('com.tencent.wstt.gt.baseCommand.startTest', '--es', 'pkgName', package_name) # 3. set collect params if cpu: broadcast('com.tencent.wstt.gt.baseCommand.sampleData', '--ei', 'cpu', '1') if net: broadcast('com.tencent.wstt.gt.baseCommand.sampleData', '--ei', 'net', '1') if pss: broadcast('com.tencent.wstt.gt.baseCommand.sampleData', '--ei', 'pss', '1') # 4. switch back to app self.d.start_app(package_name) def stop_and_save(self): self._broadcast('com.tencent.wstt.gt.baseCommand.endTest', '--es', 'saveFolderName', self._package_name, '--es', 'desc', 'Result_of_GT') print 'Run\n$ adb pull /sdcard/GT/GW/{pkgname}/{version}/{pkgname}'.format(pkgname=self._package_name, version='unknow') def quit(self): self._broadcast('com.tencent.wstt.gt.baseCommand.exitGT')
pytorch_toolbelt/modules/encoders/timm/nfnet_s.py
mohitktanwr/toolkits
1,281
11129410
<gh_stars>1000+ from .common import GenericTimmEncoder, make_n_channel_input_std_conv __all__ = [ "NFNetF0SEncoder", "NFNetF1SEncoder", "NFNetF2SEncoder", "NFNetF3SEncoder", "NFNetF4SEncoder", "NFNetF5SEncoder", "NFNetF6SEncoder", "NFNetF7SEncoder", ] class NFNetF0SEncoder(GenericTimmEncoder): def __init__(self, pretrained=True, layers=None): from timm.models import nfnet encoder = nfnet.nfnet_f0s(pretrained=pretrained, features_only=True) super().__init__(encoder, layers) def change_input_channels(self, input_channels: int, mode="auto", **kwargs): self.encoder.stem_conv1 = make_n_channel_input_std_conv( self.encoder.stem_conv1, input_channels, mode, **kwargs ) return self class NFNetF1SEncoder(GenericTimmEncoder): def __init__(self, pretrained=True, layers=None): from timm.models import nfnet if layers is None: layers = [1, 2, 3, 4] encoder = nfnet.nfnet_f1s(pretrained=pretrained, features_only=True) super().__init__(encoder, layers) def change_input_channels(self, input_channels: int, mode="auto", **kwargs): self.encoder.stem_conv1 = make_n_channel_input_std_conv( self.encoder.stem_conv1, input_channels, mode, **kwargs ) return self class NFNetF2SEncoder(GenericTimmEncoder): def __init__(self, pretrained=True, layers=None): from timm.models import nfnet encoder = nfnet.nfnet_f2s(pretrained=pretrained, features_only=True) super().__init__(encoder, layers) def change_input_channels(self, input_channels: int, mode="auto", **kwargs): self.encoder.stem_conv1 = make_n_channel_input_std_conv( self.encoder.stem_conv1, input_channels, mode, **kwargs ) return self class NFNetF3SEncoder(GenericTimmEncoder): def __init__(self, pretrained=True, layers=None): from timm.models import nfnet encoder = nfnet.nfnet_f3s(pretrained=pretrained, features_only=True) super().__init__(encoder, layers) def change_input_channels(self, input_channels: int, mode="auto", **kwargs): self.encoder.stem_conv1 = make_n_channel_input_std_conv( self.encoder.stem_conv1, input_channels, mode, **kwargs ) return self class NFNetF4SEncoder(GenericTimmEncoder): def __init__(self, pretrained=True, layers=None): from timm.models import nfnet encoder = nfnet.nfnet_f4s(pretrained=pretrained, features_only=True) super().__init__(encoder, layers) def change_input_channels(self, input_channels: int, mode="auto", **kwargs): self.encoder.stem_conv1 = make_n_channel_input_std_conv( self.encoder.stem_conv1, input_channels, mode, **kwargs ) return self class NFNetF5SEncoder(GenericTimmEncoder): def __init__(self, pretrained=True, layers=None): from timm.models import nfnet encoder = nfnet.nfnet_f5s(pretrained=pretrained, features_only=True) super().__init__(encoder, layers) def change_input_channels(self, input_channels: int, mode="auto", **kwargs): self.encoder.stem_conv1 = make_n_channel_input_std_conv( self.encoder.stem_conv1, input_channels, mode, **kwargs ) return self class NFNetF6SEncoder(GenericTimmEncoder): def __init__(self, pretrained=True, layers=None): from timm.models import nfnet encoder = nfnet.nfnet_f6s(pretrained=pretrained, features_only=True) super().__init__(encoder, layers) def change_input_channels(self, input_channels: int, mode="auto", **kwargs): self.encoder.stem_conv1 = make_n_channel_input_std_conv( self.encoder.stem_conv1, input_channels, mode, **kwargs ) return self class NFNetF7SEncoder(GenericTimmEncoder): def __init__(self, pretrained=True, layers=None): from timm.models import nfnet encoder = nfnet.nfnet_f7s(pretrained=pretrained, features_only=True) super().__init__(encoder, layers) def change_input_channels(self, input_channels: int, mode="auto", **kwargs): self.encoder.stem_conv1 = make_n_channel_input_std_conv( self.encoder.stem_conv1, input_channels, mode, **kwargs ) return self
scripts/monitoring/cron-send-ovs-status.py
fahlmant/openshift-tools
164
11129419
#!/usr/bin/env python ''' Command to send status of ovs to Zabbix ''' #This is not a module, but pylint thinks it is. This is a command. #pylint: disable=invalid-name,import-error import subprocess from openshift_tools.monitoring.metric_sender import MetricSender def get_vswitch_ports(): ''' Get list of ports from ovs ''' ## get list of running interfaces ovs_show_cmd = "/usr/bin/ovs-vsctl show" # get the output of ovs ovs_output = subprocess.check_output(ovs_show_cmd, shell=True) # pare down to only lines that contain "Port" running_port_list = [p for p in ovs_output.split('\n') if "Port" in p] return len(running_port_list) def get_vswitch_pids(): ''' Get list of ovs-related processes ''' ## get list of ovs processes ovs_ps_cmd = "/usr/bin/pgrep -f 'ovsdb-server|ovs-vswitchd'" # get the output of ps ps_output = subprocess.check_output(ovs_ps_cmd, shell=True) return len(ps_output.strip().split('\n')) def main(): ''' Get data and send to zabbix ''' vswitch_ports_count = get_vswitch_ports() vswitch_pids_count = get_vswitch_pids() print "Found %s OVS ports" % vswitch_ports_count print "Found %s OVS pids" % vswitch_pids_count # we now have all the data we want. Let's send it to Zagg mts = MetricSender() mts.add_metric({'openshift.node.ovs.ports.count' : vswitch_ports_count}) mts.add_metric({'openshift.node.ovs.pids.count' : vswitch_pids_count}) # Finally, sent them to zabbix mts.send_metrics() if __name__ == '__main__': main()
Machine Learning Projects/Fake_News_Classifier/model.py
TeacherManoj0131/HacktoberFest2020-Contributions
256
11129571
<reponame>TeacherManoj0131/HacktoberFest2020-Contributions # Necessary Libraries import pandas as pd import numpy as np import pickle import matplotlib.pyplot as plt from sklearn.metrics import classification_report, confusion_matrix from sklearn.pipeline import Pipeline from sklearn.model_selection import train_test_split from sklearn.naive_bayes import MultinomialNB from sklearn.feature_extraction.text import TfidfVectorizer import nltk nltk.download('punkt') # In[2]: # Importing the dataset df = pd.read_csv('/Users/sumitkumarkundu/PycharmProjects/Fake_News_Classifier/venv/Model/train.csv') # In[3]: print(df.head()) df.isnull().sum() # In[4]: df.sample() # In[5]: # df.info() # In[6]: # input column is text and output column is label and in text there are 39 missing data # df['text'].shape # In[7]: # but you see total 20800 text rows are there so we can drop these 28 rows , it will do no effect df['text'] = df['text'].fillna(method='ffill') # In[8]: # df['text'].shape # In[9]: X = df['text'] y = df['label'] # In[10]: # let's split the data X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25) # In[11]: # now let's create a pipeline for removing stopwords and create bag of words # applying multinomialNB as it gives us better results pipe=Pipeline([('tfidf', TfidfVectorizer(stop_words='english')), ('nbmodel', MultinomialNB())]) # In[13]: # fit the model pipe.fit(X_train, y_train) # In[15]: prediction=pipe.predict(X_test) # In[16]: print(classification_report(y_test, prediction)) # In[17]: print(confusion_matrix(y_test, prediction)) # In[18]: # see true positive and true negative value are so much high # In[19]: # make the pickle file with open('model.pickle', 'wb') as target: pickle.dump(pipe, target, protocol=pickle.HIGHEST_PROTOCOL)
cpp2py_test/ind_initialize_test.py
fevorl/BM3D_py
157
11129665
<gh_stars>100-1000 import numpy as np def ind_initialize(max_size, N, step): ind_set = np.empty(shape=[0], dtype=np.int) ind = N while (ind < max_size - N): ind_set = np.append(ind_set, np.array([ind]), axis=0) ind += step if ind_set[-1] < max_size - N - 1: ind_set = np.append(ind_set, np.array([max_size - N - 1]), axis=0) return ind_set def my_ind(max_size, N, step): ind = range(N, max_size-N, step) if ind[-1] < max_size - N - 1: ind = np.append(ind, np.array([max_size - N - 1]), axis=0) return ind if __name__ == '__main__': max_size = 100 N = 5 for step in range(1, 20): ind1 = ind_initialize(max_size, N, step) ind2 = my_ind(max_size, N, step) print(all(ind1==ind2)) # for i in ind1: # print(i) # print('-----------------------') # for i in ind2: # print(i)
qutip/tomography.py
camponogaraviera/qutip
1,205
11129714
<gh_stars>1000+ __all__ = ['qpt_plot', 'qpt_plot_combined', 'qpt'] from qutip.tensor import tensor from qutip.superoperator import spre, spost, mat2vec, vec2mat from numpy import hstack, real, imag import scipy.linalg as la from qutip.visualization import matrix_histogram, matrix_histogram_complex try: import matplotlib.pyplot as plt except: pass def _index_permutations(size_list, perm=[]): """ Generate a list with all index permutations. Parameters ---------- size_list : list A list that contains the sizes for each composite system. perm : list A list of permutations Returns ------- perm_idx : list List containing index permutations. """ if len(size_list) == 0: yield perm else: for n in range(size_list[0]): for ip in _index_permutations(size_list[1:], perm + [n]): yield ip def qpt_plot(chi, lbls_list, title=None, fig=None, axes=None): """ Visualize the quantum process tomography chi matrix. Plot the real and imaginary parts separately. Parameters ---------- chi : array Input QPT chi matrix. lbls_list : list List of labels for QPT plot axes. title : string Plot title. fig : figure instance User defined figure instance used for generating QPT plot. axes : list of figure axis instance User defined figure axis instance (list of two axes) used for generating QPT plot. Returns ------- fig, ax : tuple A tuple of the matplotlib figure and axes instances used to produce the figure. """ if axes is None or len(axes) != 2: if fig is None: fig = plt.figure(figsize=(16, 8)) ax1 = fig.add_subplot(1, 2, 1, projection='3d', position=[0, 0, 1, 1]) ax2 = fig.add_subplot(1, 2, 2, projection='3d', position=[0, 0, 1, 1]) axes = [ax1, ax2] xlabels = [] for inds in _index_permutations([len(lbls) for lbls in lbls_list]): xlabels.append("".join([lbls_list[k][inds[k]] for k in range(len(lbls_list))])) matrix_histogram(real(chi), xlabels, xlabels, title=r"real($\chi$)", limits=[-1, 1], ax=axes[0]) matrix_histogram(imag(chi), xlabels, xlabels, title=r"imag($\chi$)", limits=[-1, 1], ax=axes[1]) if title and fig: fig.suptitle(title) return fig, axes def qpt_plot_combined(chi, lbls_list, title=None, fig=None, ax=None, figsize=(8, 6), threshold=None): """ Visualize the quantum process tomography chi matrix. Plot bars with height and color corresponding to the absolute value and phase, respectively. Parameters ---------- chi : array Input QPT chi matrix. lbls_list : list List of labels for QPT plot axes. title : string Plot title. fig : figure instance User defined figure instance used for generating QPT plot. ax : figure axis instance User defined figure axis instance used for generating QPT plot (alternative to the fig argument). threshold: float (None) Threshold for when bars of smaller height should be transparent. If not set, all bars are colored according to the color map. Returns ------- fig, ax : tuple A tuple of the matplotlib figure and axes instances used to produce the figure. """ if ax is None: if fig is None: fig = plt.figure(figsize=figsize) ax = fig.add_subplot(1, 1, 1, projection='3d', position=[0, 0, 1, 1]) xlabels = [] for inds in _index_permutations([len(lbls) for lbls in lbls_list]): xlabels.append("".join( [lbls_list[k][inds[k]] for k in range(len(lbls_list))])) if not title: title = r"$\chi$" matrix_histogram_complex(chi, xlabels, xlabels, title=title, ax=ax, threshold=threshold) return fig, ax def qpt(U, op_basis_list): """ Calculate the quantum process tomography chi matrix for a given (possibly nonunitary) transformation matrix U, which transforms a density matrix in vector form according to: vec(rho) = U * vec(rho0) or rho = vec2mat(U * mat2vec(rho0)) U can be calculated for an open quantum system using the QuTiP propagator function. Parameters ---------- U : Qobj Transformation operator. Can be calculated using QuTiP propagator function. op_basis_list : list A list of Qobj's representing the basis states. Returns ------- chi : array QPT chi matrix """ E_ops = [] # loop over all index permutations for inds in _index_permutations([len(ops) for ops in op_basis_list]): # loop over all composite systems E_op_list = [op_basis_list[k][inds[k]] for k in range(len( op_basis_list))] E_ops.append(tensor(E_op_list)) EE_ops = [spre(E1) * spost(E2.dag()) for E1 in E_ops for E2 in E_ops] M = hstack([mat2vec(EE.full()) for EE in EE_ops]) Uvec = mat2vec(U.full()) chi_vec = la.solve(M, Uvec) return vec2mat(chi_vec)
doc/samples/timeout.py
m4ta1l/doit
1,390
11129720
import datetime from doit.tools import timeout def task_expire(): return { 'actions': ['echo test expire; date'], 'uptodate': [timeout(datetime.timedelta(minutes=5))], 'verbosity': 2, }
tf_encrypted/operations/secure_random/__init__.py
wqruan/tf-encrypted
825
11129732
<gh_stars>100-1000 """Secure random API.""" from .secure_random import random_uniform from .secure_random import secure_seed from .secure_random import seeded_random_uniform from .secure_random import supports_secure_randomness from .secure_random import supports_seeded_randomness __all__ = [ "supports_secure_randomness", "supports_seeded_randomness", "seeded_random_uniform", "random_uniform", "secure_seed", ]
corehq/apps/app_manager/migrations/0018_migrate_case_search_labels.py
akashkj/commcare-hq
471
11129738
<gh_stars>100-1000 from django.core.management import call_command from django.db import migrations from corehq.toggles import SYNC_SEARCH_CASE_CLAIM from corehq.util.django_migrations import skip_on_fresh_install @skip_on_fresh_install def _migrate_case_search_labels(apps, schema_editor): for domain in sorted(SYNC_SEARCH_CASE_CLAIM.get_enabled_domains()): call_command('migrate_case_search_labels', domain=domain) class Migration(migrations.Migration): dependencies = [ ('app_manager', '0017_migrate_case_search_relevant'), ] operations = [ migrations.RunPython(_migrate_case_search_labels, reverse_code=migrations.RunPython.noop, elidable=True), ]
tests/contrib/pylons/tests.py
ascan-io/raven-python
1,108
11129758
from raven.utils.testutils import TestCase from raven.contrib.pylons import Sentry def example_app(environ, start_response): raise ValueError('hello world') class MiddlewareTest(TestCase): def setUp(self): self.app = example_app def test_init(self): config = { 'sentry.dsn': 'http://public:[email protected]/1', } middleware = Sentry(self.app, config)
tests/stg2_network_graph_dump.py
qiaone/GIF
322
11129790
import torch import numpy as np from model.stg2_generator import Generator from model.stg2_discriminator import Discriminator if __name__ == "__main__": from my_utils.graph_writer import graph_writer img_size = 256 generator = Generator(img_size, 512, 8, channel_multiplier=2) # from my_utils.print_model_summary import summary # summary(generator, (1, 512)) graph_writer.draw(generator, 'STG2_Original_Generator.png', (16, 38), [torch.zeros((1, 512), dtype=torch.float32, device='cpu'), ], randomize_noise=False) print('Generator modle saved') tot_gen_params = 0 for discrim_params in generator.parameters(): tot_gen_params += np.prod(discrim_params.shape) print(f'generator n_params: {tot_gen_params}') discriminator = Discriminator(img_size, channel_multiplier=2) graph_writer.draw(discriminator, 'STG2_Original_Discriminator.png', (16, 38), torch.zeros((1, 3, img_size, img_size), dtype=torch.float32, device='cpu')) print('Generator modle saved') tot_gen_params = 0 for discrim_params in discriminator.parameters(): tot_gen_params += np.prod(discrim_params.shape) print(f'discriminator n_params: {tot_gen_params}')
sleap/nn/architectures/common.py
preeti98/sleap
156
11129802
"""Common utilities for architecture and model building.""" import attr import tensorflow as tf @attr.s(auto_attribs=True) class IntermediateFeature: """Intermediate feature tensor for use in skip connections. This class is effectively a named tuple to store the stride (resolution) metadata. Attributes: tensor: The tensor output from an intermediate layer. stride: Stride of the tensor relative to the input. """ tensor: tf.Tensor stride: int @property def scale(self) -> float: """Return the absolute scale of the tensor relative to the input. This is equivalent to the reciprocal of the stride, e.g., stride 2 => scale 0.5. """ return 1.0 / float(self.stride)
LeafNATS/data/nli/process_minibatch_v1.py
haophancs/TREQS
149
11129809
<filename>LeafNATS/data/nli/process_minibatch_v1.py ''' @author <NAME> Please contact <EMAIL> ''' import json import re import torch from torch.autograd import Variable def process_minibatch(input_, vocab2id, premise_max_lens, hypothesis_max_lens): ''' Process minibatch. ''' len_premise = [] len_hypothe = [] premise_arr = [] hypothe_arr = [] label_arr = [] for line in input_: data = json.loads(line) label_arr.append(data['gold_label']+1) premise = data['premise'] len_premise.append(len(premise)) premise2id = [vocab2id[wd] if wd in vocab2id else vocab2id['<unk>'] for wd in premise] premise_arr.append(premise2id) hypothe = data['hypothesis'] len_hypothe.append(len(hypothe)) hypothe2id = [vocab2id[wd] if wd in vocab2id else vocab2id['<unk>'] for wd in hypothe] hypothe_arr.append(hypothe2id) premise_lens = min(premise_max_lens, max(len_premise)) hypothe_lens = min(hypothesis_max_lens, max(len_hypothe)) premise_arr = [itm[:premise_lens] for itm in premise_arr] premise_arr = [itm + [vocab2id['<pad>']] * (premise_lens-len(itm)) for itm in premise_arr] premise_var = Variable(torch.LongTensor(premise_arr)) hypothe_arr = [itm[:hypothe_lens] for itm in hypothe_arr] hypothe_arr = [itm + [vocab2id['<pad>']] * (hypothe_lens-len(itm)) for itm in hypothe_arr] hypothe_var = Variable(torch.LongTensor(hypothe_arr)) label_var = Variable(torch.LongTensor(label_arr)) premise_mask = Variable(torch.FloatTensor(premise_arr)) premise_mask[premise_mask != 1.0] = 0.0 premise_mask = 1.0 - premise_mask hypothe_mask = Variable(torch.FloatTensor(hypothe_arr)) hypothe_mask[hypothe_mask != 1.0] = 0.0 hypothe_mask = 1.0 - hypothe_mask return premise_var, hypothe_var, premise_mask, hypothe_mask, label_var
Python3/1138.py
rakhi2001/ecom7
854
11129810
__________________________________________________________________________________________________ sample 24 ms submission class Solution: def alphabetBoardPath(self, target: str) -> str: code = [ord(t) - ord('a') for t in target] code = [(c // 5, c % 5) for c in code] res = "" code = [(0,0)] + code def path(step1, step2, num, res): if num > 0: res += step1 * num elif num < 0: res += step2 * (-num) return res for i in range(1, len(code)): v = code[i][0] - code[i-1][0] h = code[i][1] - code[i-1][1] if code[i] == (5,0): res = path('R', 'L', h, res) res = path('D', 'U', v, res) else: res = path('D', 'U', v, res) res = path('R', 'L', h, res) res += '!' return res __________________________________________________________________________________________________ sample 28 ms submission class Solution: def alphabetBoardPath(self, target: str) -> str: s,i,j,n='',0,0,ord('a') for c in target: t=ord(c)-n ii,jj=t//5,t%5 if ii>i: if jj>j: s+='R'*(jj-j) else: s+='L'*(j-jj) s+='D'*(ii-i) else: s+='U'*(i-ii) if jj>j: s+='R'*(jj-j) else: s+='L'*(j-jj) s+='!' i,j=ii,jj return s __________________________________________________________________________________________________ sample 32 ms submission class Solution: def alphabetBoardPath(self, target: str) -> str: board = ["abcde", "fghij", "klmno", "pqrst", "uvwxy", "z"] adjust = ord('a') words_per_set = 5 current_r=0 current_c=0 result="" def is_exits(r,c): try: board[r][c] return True except Exception: return False for t_c in target: adj_ord_c=ord(t_c)-adjust target_r = adj_ord_c // words_per_set target_c = adj_ord_c % words_per_set while (target_r != current_r) or (target_c != current_c): if current_r > target_r and is_exits(current_r-1,current_c): result+="U" current_r -= 1 continue if current_r < target_r and is_exits(current_r+1,current_c): result+="D" current_r += 1 continue if current_c > target_c and is_exits(current_r,current_c-1): result+="L" current_c -= 1 continue if current_c < target_c and is_exits(current_r,current_c+1): result+="R" current_c += 1 continue result += "!" return result
test/test_management.py
mehrdad-shokri/QT4A
343
11129829
<filename>test/test_management.py # -*- coding: UTF-8 -*- # # Tencent is pleased to support the open source community by making QTA available. # Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved. # Licensed under the BSD 3-Clause License (the "License"); you may not use this # file except in compliance with the License. You may obtain a copy of the License at # # https://opensource.org/licenses/BSD-3-Clause # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS # OF ANY KIND, either express or implied. See the License for the specific language # governing permissions and limitations under the License. # '''management模块单元测试 ''' import logging import os import unittest import sys from qt4a.management import qt4a_repack_apk class TestManagement(unittest.TestCase): ''' ''' def _check_jdk_env(self): sep = ';' if sys.platform == 'win32' else ':' paths = os.environ['PATH'].split(sep) for path in paths: jarsigner_path = os.path.join(path, 'jarsigner.exe' if sys.platform == 'win32' else 'jarsigner') if os.path.exists(jarsigner_path): return True return False def test_repack(self): if not self._check_jdk_env(): logging.warn('Check JDK environment failed') return apk_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'qt4a', 'androiddriver', 'tools', 'QT4AHelper.apk') outpath = qt4a_repack_apk(apk_path) self.assertTrue(os.path.exists(outpath)) def test_repack_low_memory(self): if not self._check_jdk_env(): logging.warn('Check JDK environment failed') return apk_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'qt4a', 'androiddriver', 'tools', 'QT4AHelper.apk') outpath = qt4a_repack_apk(apk_path, max_heap_size=1) self.assertTrue(os.path.exists(outpath)) if __name__ == '__main__': unittest.main()
cockpit/cockpit.py
wx-b/cockpit
367
11129870
<gh_stars>100-1000 """Cockpit.""" import os from collections import defaultdict from typing import Set import json_tricks from backpack import disable from backpack.extensions.backprop_extension import BackpropExtension from cockpit import quantities from cockpit.context import BackwardCTX, get_loss from cockpit.quantities.quantity import Quantity from cockpit.quantities.utils_transforms import BatchGradTransformsHook class Cockpit: """Cockpit class.""" BACKPACK_CONV_SAVE_MEMORY = True """bool: Tell BackPACK to use a more memory-efficient Jacobian-vector product algorithm for weights in convolution layers. Default: ``True``. """ def __init__(self, params, quantities=None): """Initialize a cockpit. Args: params (iterable): List or sequence of parameters on which the quantities will be evaluated. Every parameter must have ``require_grad = True``, otherwise the computation cannot be executed. quantities (list, optional): List of ``Quantity`` (instances) that will be tracked. Defaults to None, which will use no quantities. Raises: ValueError: If not all passed parameters have ``required_grad=True``. """ # check parameters self.params = list(params) for p in self.params: if not p.requires_grad: raise ValueError(f"Got parameter with requires_grad=False: {p}") # initialize output self.output = defaultdict(dict) # add quantities to cockpit if quantities is None: quantities = [] self.quantities = [] for q in quantities: self.add(q) def add(self, quantity): """Add quantity to tracked quantities. Args: quantity (cockpit.quantites.Quantity): The quantity to be added. Raises: ValueError: If passed quantity is not a ``cockpit.quantity``. """ if not isinstance(quantity, Quantity): raise ValueError( f"Added quantities must be instances of Quantity. Got {quantity}" ) else: self.quantities.append(quantity) def create_graph(self, global_step): """Return if computation graph should be kept for computing quantities. Args: global_step (int): Current number of iteration. Returns: bool: ``True`` if computation graph should be kept, else ``False``. """ return any(q.create_graph(global_step) for q in self.quantities) def _get_extensions(self, global_step, custom_exts=()): """Collect BackPACK extensions required at current iteration. Args: global_step (int): Current number of iteration. custom_exts (list or tuple): Custom BackPACK extensions that will be computed on top. Returns: list: List of required BackPACK extensions for the current iteration. """ # TODO A user could introduce a bug here by running an extension which is # also required by one quantity, but uses different hyperparameters (for # instance the user picks ``DiagGGNMC(mc_samples=2)`` the Hessian trace # quantity uses ``DiagGGNMC(mc_samples=1)``.). Catching such a corner case # requires hyperparameter comparison of extensions. Considering the large # amount of required boilerplate, this is left for the future ext = list(custom_exts) for q in self.quantities: ext += q.extensions(global_step) ext = self._process_duplicate_extensions(ext) return ext def _get_extension_hook(self, global_step): """Build BackPACK extension hook for the current iteration. Args: global_step (int): Current number of iteration. Returns: callable or None: BackPACK extension hook for the current iteration. ``None`` indicates no hook. """ hooks = [] for q in self.quantities: hooks += q.extension_hooks(global_step) # Currently expects only ``BatchGradTransformsHook``s # This changes with https://github.com/f-dangel/cockpit-paper/issues/142 assert all(isinstance(h, BatchGradTransformsHook) for h in hooks) if len(hooks) == 0: hook = None else: hook = self._merge_batch_grad_transform_hooks(hooks) return hook def _get_protected_savefields(self, global_step: int) -> Set[str]: """Return names of protected BackPACK buffers. Args: global_step: Current iteration number. Returns: List of protected buffers. """ protected = set() for q in self.quantities: protected.update(q.protected_savefields(global_step)) return protected def __call__(self, global_step, *exts, info=None, debug=False): """Returns the backpack extensions that should be used in this iteration. Args: global_step (int): Current number of iteration. *exts: Custom BackPACK extensions that will be computed on top. info (dict): Dictionary that specifies additional information. Some quantities require additional information that is overly difficult to infer from a backward pass, like the individual losses. debug (bool, optional): Enable debug mode.. Defaults to False. Returns: backpack.backpack: BackPACK with the appropriate extensions, or the backpack_disable_io context. """ for e in exts: assert isinstance( e, BackpropExtension ), f"*exts must be tuple of backpack extensions. Got {e}" if info is None: info = {} if "optimizer" in info: self._optimizer_name = type(info["optimizer"]).__name__ return BackwardCTX(self, global_step, exts, info, debug=debug) def track(self, global_step, protected_savefields=()): """Tracking all quantities. Args: global_step (int): Current number of iteration. protected_savefields ([str]): List of strings containing attribute names of backpack extensions that will not be deleted after the backward pass :meta private: """ batch_loss = get_loss(global_step) before_cleanup = [ q for q in self.quantities if not isinstance(q, quantities.HessMaxEV) ] for q in before_cleanup: q.track(global_step, self.params, batch_loss) self._free_backpack_buffers(global_step, protected_savefields) after_cleanup = [ q for q in self.quantities if isinstance(q, quantities.HessMaxEV) ] with disable(): for q in after_cleanup: q.track(global_step, self.params, batch_loss) def _free_backpack_buffers(self, global_step, protected_savefields, verbose=False): """Manually free quantities computed by BackPACK to save memory. Args: global_step (int): Current number of iteration. protected_savefields ([str]): List of strings containing attribute names of backpack extensions that will not be deleted after the backward pass verbose (bool, optional): Turns on verbose mode. Defaults to ``False``. """ if verbose: print("Freeing BackPACK buffers") savefields = [ext.savefield for ext in self._get_extensions(global_step)] # TODO Determine hook savefields through ``ParameterExtensionHook`` and trigger # deletion. This can only happen after hooks have been introduced for all # quantities, see https://github.com/f-dangel/cockpit-paper/issues/142 savefields.append("grad_batch_transforms") for param in self.params: for field in savefields: try: if field not in protected_savefields: delattr(param, field) if verbose: print( f"Deleting '{field}' from param of shape {param.shape}" ) except AttributeError: pass def log( self, global_step, epoch_count, train_loss, valid_loss, test_loss, train_accuracy, valid_accuracy, test_accuracy, learning_rate, ): """Tracking function for quantities computed at every epoch. Args: global_step (int): Current number of iteration/global step. epoch_count (int): Current number of epoch. train_loss (float): Loss on the train (eval) set. valid_loss (float): Loss on the validation set. test_loss (float): Loss on the test set. train_accuracy (float): Accuracy on the train (eval) set. valid_accuracy (float): Accuracy on the validation set. test_accuracy (float): Accuracy on the test set. learning_rate (float): Learning rate of the optimizer. We assume, that the optimizer uses a single global learning rate, which is used for all parameter groups. """ # Store inputs self.output[global_step]["epoch_count"] = epoch_count self.output[global_step]["train_loss"] = train_loss self.output[global_step]["valid_loss"] = valid_loss self.output[global_step]["test_loss"] = test_loss self.output[global_step]["train_accuracy"] = train_accuracy self.output[global_step]["valid_accuracy"] = valid_accuracy self.output[global_step]["test_accuracy"] = test_accuracy self.output[global_step]["learning_rate"] = learning_rate def write(self, logpath): """Write tracked quantities to a json file. Args: logpath (str): Path to a log file without the ``.json`` suffix. """ logpath_with_suffix = logpath + ".json" print(f"[cockpit] writing output to {logpath_with_suffix}") os.makedirs(os.path.dirname(logpath_with_suffix), exist_ok=True) with open(logpath_with_suffix, "w") as json_file: json_tricks.dump(self.get_output(), json_file, indent=4, sort_keys=True) def _update_output(self): """Fetch outputs from tracked quantities into ``self.output``.""" # Update the cockpit with the outputs from the individual quantities for q in self.quantities: key = q.__class__.__name__ for iteration, value in q.get_output().items(): self.output[iteration][key] = value def get_output(self): """Return a nested dictionary that stores the results of all tracked quantities. First key corresponds to the iteration, second key is the quantity class name, values represent the computational result of the quantity at that iteration. Example: >>> cockpit = Cockpit(...) >>> # information tracked at iteration 3 >>> global_step = 3 >>> global_step_output = cockpit.get_output()[global_step] >>> # information tracked at iteration 3 by Hessian max eigenvalue quantity >>> key = "HessMaxEV" >>> max_ev_global_step_output = cockpit.output[global_step][key] Returns: dict: Nested dictionary with the results of all tracked quantities. """ self._update_output() return self.output def _process_duplicate_extensions(self, ext): """Remove duplicate BackPACK extensions. Note: Two extensions are considered equal if they are of the same class. Args: ext ([backpack.extensions]): A list of BackPACK extensions, potentially containing duplicates. Returns: [backpack.extensions]: A list of unique BackPACK extensions """ ext_dict = dict() no_duplicate_ext = [] for e in ext: if type(e) in ext_dict: pass else: no_duplicate_ext.append(e) ext_dict[type(e)] = True return no_duplicate_ext @staticmethod def _merge_batch_grad_transform_hooks(batch_grad_transform_hooks): """Merge multiple ``BatchGradTransformHook``s, removing duplicate transforms. Note: Two transformations are identical if they have the same ``id``. Args: batch_grad_transform_hooks ([BatchGradTransformsHook]): List of ``BatchGradTransformHook``s. Raises: ValueError: If there is a non-unique transform. Returns: BatchGradTransformsHook: Single transform that includes all transforms. """ transforms = [t._transforms for t in batch_grad_transform_hooks] key_function_pairs = [] for t in transforms: for key, value in t.items(): key_function_pairs.append((key, value)) unique_keys = {pair[0] for pair in key_function_pairs} combined_transforms = {} for key in unique_keys: functions = [pair[1] for pair in key_function_pairs if pair[0] == key] ids = [id(f) for f in functions] if len(set(ids)) != 1: raise ValueError( f"Got non-unique transform functions with ids {ids} for key '{key}'" ) else: combined_transforms[key] = functions[0] return BatchGradTransformsHook(combined_transforms)
smarts/sstudio/tests/test_generate.py
isgeles/SMARTS
554
11129871
# MIT License # # Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import os import tempfile from typing import Sequence from xml.etree.ElementTree import ElementTree import pytest from smarts.sstudio import gen_missions, gen_traffic from smarts.sstudio.types import ( Distribution, Flow, JunctionModel, LaneChangingModel, Mission, Route, Traffic, TrafficActor, ) @pytest.fixture def traffic() -> Traffic: car1 = TrafficActor( name="car", speed=Distribution(sigma=0.2, mean=1.0), ) car2 = TrafficActor( name="car", speed=Distribution(sigma=0.2, mean=0.8), lane_changing_model=LaneChangingModel(impatience=1, cooperative=0.25), junction_model=JunctionModel(drive_after_yellow_time=1.0, impatience=0.5), ) return Traffic( flows=[ Flow( route=Route( begin=(f"edge-{r[0]}", 0, 30), end=(f"edge-{r[1]}", 0, -30) ), rate=1.0, actors={ car1: 0.5, car2: 0.5, }, ) for r in [("west-WE", "east-WE"), ("east-EW", "west-EW")] ] ) @pytest.fixture def missions() -> Sequence[Mission]: return [ Mission(Route(begin=("edge-west-WE", 0, 0), end=("edge-south-NS", 0, 0))), Mission(Route(begin=("edge-south-SN", 0, 30), end=("edge-west-EW", 0, 0))), ] def test_generate_traffic(traffic: Traffic): with tempfile.TemporaryDirectory() as temp_dir: gen_traffic( "scenarios/intersections/4lane_t", traffic, output_dir=temp_dir, name="generated", ) with open("smarts/sstudio/tests/baseline.rou.xml") as f: items = [x.items() for x in ElementTree(file=f).iter()] with open(os.path.join(temp_dir, "traffic", "generated.rou.xml")) as f: generated_items = [x.items() for x in ElementTree(file=f).iter()] print(sorted(items)) print(sorted(generated_items)) assert sorted(items) == sorted(generated_items)
BitTornado/Types/addresses.py
alahoo/BitTornado
116
11129877
"""Tools for validating, parsing, and comparing network addresses. Address is an abstract class, of which IPv4 and IPv6 are subclasses, which builds on top of the socket parsing of network addresses and represents addresses directly as their integer values. IP is the direct superclass of IPv4 and IPv6, which accepts valid addresses for either class, preferring IPv4 in ambiguous cases. """ import socket from .primitives import UnsignedInt class Address(UnsignedInt): """Unsigned integer representations of network addresses, building on the socket library. Subclass with number of bits and address family.""" family = None def __new__(cls, val=0): """Convert a number or a string to an Address.""" if cls.bits is None or cls.family is None: raise NotImplementedError( "Do not call {!s}() directly".format(cls.__name__)) if isinstance(val, str): if val.find(':') < 0: try: val = socket.gethostbyname(val) except socket.gaierror: pass try: return cls.from_bytes(socket.inet_pton(cls.family, val), 'big') except OSError: raise ValueError("invalid literal for {}(): {!r}".format( cls.__name__, val)) return super(Address, cls).__new__(cls, val) def __str__(self): """Use socket library formatting""" return socket.inet_ntop(self.family, self.to_bytes(self.bits // 8, 'big')) def mask(self, nbits): """Return an address with the first n bits preserved and the rest zeroes out.""" ones = (1 << self.bits) - 1 return self.__class__(self & (ones << (self.bits - nbits))) class IP(Address): """Generic IP address IP() == IPv4('0.0.0.0') IP('::') == IPv6('::') Enables conversion between IP classes: IP().to(IPv6) == IPv6('::ffff:0:0') IP('::ffff:0:0').to(IPv4) == IPv4('0.0.0.0') """ v4mask = 0xffff00000000 def __new__(cls, val=0): if cls.family is None: for subclass in cls.subclasses: try: return subclass(val) except (ValueError, OverflowError): pass raise ValueError('Invalid address: {}'.format(val)) return super(IP, cls).__new__(cls, val) def to(self, cls): # pylint: disable=invalid-name """Convert between IP classes, if possible. IPv4('w.x.y.z').to(IPv6) == IPv6('::ffff:w.x.y.z') IPv6('::ffff:w.x.y.z').to(IPv4) == IPv4('w.x.y.z') """ if isinstance(self, cls): return self try: return cls(self.convert[type(self)][cls](self)) except (KeyError, OverflowError): raise ValueError("not convertible to {}".format(cls.__name__)) class IPv4(IP): """Integer representation of IPv4 network addresses, building on the socket library.""" bits = 32 family = socket.AF_INET class IPv6(IP): """Integer representation of IPv6 network addresses, building on the socket library.""" bits = 128 family = socket.AF_INET6 IP.subclasses = (IPv4, IPv6) IP.convert = {IPv4: {IPv6: lambda x: x | IP.v4mask}, IPv6: {IPv4: lambda x: x ^ IP.v4mask}}
engine/core/ranges.py
pianomanx/pwndora
112
11129913
<gh_stars>100-1000 from ipaddress import ip_address from loguru import logger import ipaddress import queue import random ''' Some functions to validate ip address ranges received as arguments. Detects the type of range, for example if it is CIDR and generates a queue with all the targets to send to the Threadscan object. ''' def get_ranges(start,end): #Get total of ip addresses start_int = int(ip_address(start).packed.hex(), 16) end_int = int(ip_address(end).packed.hex(), 16) return [ip_address(ip).exploded for ip in range(start_int, end_int)] def get_total_ip_ranges(file): total = [] with open(file, 'r') as flist: blocks = list(filter(None, flist.read().splitlines())) for ip in blocks: targets = detect_range_type(ip) for t in targets: total.append(t) return total def put_targets_in_queue(targets): q = queue.Queue() for t in targets: q.put(t) return q def single_range(iprange): total = detect_range_type(iprange) return randomize_list(total) def multiple_ranges(file): total = get_total_ip_ranges(file) return randomize_list(total) def randomize_list(total): shuffled = sorted(total, key=lambda L: random.random()) return put_targets_in_queue(shuffled) def detect_range_type(iprange): if "," in iprange: ranges = iprange.split(",") result = get_ranges(ranges[0],ranges[1]) elif "/" in iprange: result = get_cidr(iprange) else: logger.info("Invalid target") exit() return result def get_cidr(iprange): return [str(ip) for ip in ipaddress.IPv4Network(iprange)]
python/tvm/runtime/vm.py
shreejitverma/tvm
2,084
11129927
<gh_stars>1000+ # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=no-else-return, unidiomatic-typecheck, undefined-variable, invalid-name, redefined-builtin """ The Relay Virtual Machine runtime. Implements a Python interface to executing the compiled VM object. """ import numpy as np import tvm from tvm.runtime import Module from tvm._ffi.runtime_ctypes import TVMByteArray from tvm._ffi import base as _base from .object import Object from . import _ffi_api, container from ..rpc.base import RPC_SESS_MASK def _convert(arg, cargs): if isinstance(arg, Object): cargs.append(arg) elif isinstance(arg, np.ndarray): nd_arr = tvm.nd.array(arg, device=tvm.cpu(0)) cargs.append(nd_arr) elif isinstance(arg, tvm.runtime.NDArray): cargs.append(arg) elif isinstance(arg, (tuple, list)): field_args = [] for field in arg: _convert(field, field_args) cargs.append(container.tuple_object(field_args)) elif isinstance(arg, (_base.numeric_types, bool)): dtype = "int32" if isinstance(arg, (_base.integer_types, bool)) else "float32" value = tvm.nd.array(np.array(arg, dtype=dtype), device=tvm.cpu(0)) cargs.append(value) elif isinstance(arg, str): cargs.append(arg) else: raise TypeError("Unsupported type: %s" % (type(arg))) def convert(args): cargs = [] for arg in args: _convert(arg, cargs) return cargs class Executable(object): """Relay VM executable""" def __init__(self, mod): self.mod = mod self._function_params = {} self._save = self.mod["save"] self._get_lib = self.mod["get_lib"] self._get_bytecode = self.mod["get_bytecode"] self._get_constants = self.mod["get_constants"] self._get_virtual_devices = self.mod["get_virtual_devices"] self._get_primitives = self.mod["get_primitives"] self._get_stats = self.mod["get_stats"] self._get_function_arity = self.mod["get_function_arity"] self._get_function_param_name = self.mod["get_function_param_name"] self._move_late_bound_consts = self.mod["move_late_bound_consts"] self._load_late_bound_consts = self.mod["load_late_bound_consts"] def save(self): """Save the Relay VM Executable. Returns ------- code : bytearray The binary blob representing a serialized Relay VM executable. It can then be saved to disk and later deserialized into a new Executable. lib : :py:class:`~tvm.runtime.Module` The runtime module that contains the generated code. It is basically a library that is composed of hardware dependent code. Notes ----- The returned code is organized with the following sections in order. - Global section. This section contains the globals used by the virtual machine. - Constant section. This section is used to store the constant pool of a virtual machine. - Primitive name section. This section is introduced to accommodate the list of primitive operator names that will be invoked by the virtual machine. - Code section. The VM functions, including bytecode, are sitting in this section. Examples -------- .. code-block:: python import numpy as np import tvm from tvm import te from tvm import relay # define a simple network. x = relay.var('x', shape=(10, 10)) f = relay.Function([x], x + x) mod = tvm.IRModule({"main": f}) # create a Relay VM. dev = tvm.cpu() target = "llvm" executable = relay.vm.compile(mod, target) code, lib = executable.save() # save and load the code and lib file. tmp = tvm.contrib.utils.tempdir() path_lib = tmp.relpath("lib.so") lib.export_library(path_lib) with open(tmp.relpath("code.ro"), "wb") as fo: fo.write(code) loaded_lib = tvm.runtime.load_module(path_lib) loaded_code = bytearray(open(tmp.relpath("code.ro"), "rb").read()) # deserialize. des_exec = tvm.runtime.vm.Executable.load_exec(loaded_code, loaded_lib) # execute the deserialized executable. x_data = np.random.rand(10, 10).astype('float32') des_vm = tvm.runtime.vm.VirtualMachine(des_exec, dev) res = des_vm.run(x_data) print(res.numpy()) """ return self._save(), self._get_lib() @staticmethod def load_exec(bytecode, lib): """Construct an executable from saved artifacts. Parameters ---------- bytecode : bytearray The binary blob representing a the Relay VM bytecode. lib : :py:class:`~tvm.runtime.Module` The runtime module that contains the generated code. Returns ------- exec: Executable An executable constructed using the provided artifacts. """ if isinstance(bytecode, (bytes, str)): bytecode = bytearray(bytecode) elif not isinstance(bytecode, (bytearray, TVMByteArray)): raise TypeError( "bytecode is expected to be the type of bytearray " + "or TVMByteArray, but received {}".format(type(bytecode)) ) if lib is not None and not isinstance(lib, tvm.runtime.Module): raise TypeError( "lib is expected to be the type of tvm.runtime.Module" + ", but received {}".format(type(lib)) ) return Executable(_ffi_api.Load_Executable(bytecode, lib)) @property def lib(self): """Get the library that contains hardware dependent code. Returns ------- ret : :py:class:`~tvm.runtime.Module` The runtime module that contains hardware dependent code. """ return self._get_lib() @property def stats(self): """Get the statistics of the Relay VM executable. Returns ------- ret : String The statistic information of the VM executable. """ return self._get_stats() @property def primitive_ops(self): """Get the name of the primitive ops contained in the executable. Returns ------- ret : List[String] The list of primitive ops. """ ret = [] num_primitives = _ffi_api.GetNumOfPrimitives(self.module) for i in range(num_primitives): ret.append(_ffi_api.GetPrimitiveFields(self.module, i)) return ret @property def bytecode(self): """Get the bytecode of the Relay VM executable. Returns ------- ret : String The bytecode of the executable. Notes ----- The bytecode is in the following format: func_name reg_file_size num_instructions param1 param2 ... paramM instruction1 instruction2 ... instructionN Each instruction is printed in the following format: hash opcode field1 ... fieldX # The text format. The part starting from # is only used for visualization and debugging. The real serialized code doesn't contain it, therefore the deserializer doesn't need to deal with it as well. """ return self._get_bytecode() @property def constants(self): """Returns a human-readable description of all the constants in the executable. Useful for debugging and diffing generated executables in unit tests.""" return self._get_constants() @property def virtual_devices(self): """Returns a human-readable description of all the (virtual) devices in the executable.""" return self._get_virtual_devices() @property def primitives(self): """Returns a human-readable description of all the primitives (ie PackedFuncs) in the executable""" return self._get_primitives() @property def globals(self): """Get the globals used by the Relay VM executable. Returns ------- ret : List[String] The globals contained in the executable. """ ret = [] num_globals = _ffi_api.GetNumOfGlobals(self.module) for i in range(num_globals): ret.append(_ffi_api.GetGlobalFields(self.module, i)) return ret @property def module(self): """Return the runtime module contained in a virtual machine executable.""" return self.mod def get_function_params(self, func_name): """Get VM Function parameters""" if func_name in self._function_params: return self._function_params[func_name] arity = self._get_function_arity(func_name) assert arity >= 0 params = [] for i in range(arity): p = self._get_function_param_name(func_name, i) assert p params.append(p) self._function_params[func_name] = params return params def move_late_bound_consts(self, path, byte_limit): """Move all constants of byte size greater or equal to byte_limit to file at path""" return self._move_late_bound_consts(path, byte_limit) def load_late_bound_consts(self, path): """Re-load constants previously saved to file at path""" return self._load_late_bound_consts(path, bytes) class VirtualMachine(object): """Relay VM runtime. Parameters ---------- exe : Executable The VM executable. device : tvm.runtime.Device or List[tvm.runtime.Device] The device(s) on which the model will run. Currently at most one device per device type is supported. memory_cfg : str or Dict[tvm.runtime.Device, str], optional Config the type of memory allocator. The allocator type can be ["naive", "pooled"]. If memory_cfg is None, all devices will use pooled allocator by default. If memory_cfg is string, all devices will use the specified allocator type. If memory_cfg is a dict, each device uses the allocator type specified in the dict, or pooled allocator if not specified in the dict. """ NAIVE_ALLOCATOR = 1 POOLED_ALLOCATOR = 2 def __init__(self, exe, device, memory_cfg=None): """ Construct a VirtualMachine wrapper class which provides a simple interface over the raw C++ Module based API. Parameters ---------- exe: Union[Executable, Module] The executable either with the wrapper Python type or the raw runtime.Module. In most cases this will be the Python wrapper class tvm.runtime.vm.Executable but if you instead get the underlying runtime.Module subclass (i.e `exe.mod`) you can directly pass it to this method. This case can occur when doing things such as RPC where TVM's module APIs return the raw modules, not the wrapped modules. This constructor will handle this internally. device: Union[Device, List[Device]] The device, or devices on which to execute the VM code. memory_cfg: Optional[str] The allocator behavior to use for the VM. Returns ------- vm: VirtualMachine A VM wrapper object. """ if not isinstance(exe, Executable) and not isinstance(exe, Module): raise TypeError( "exe is expected to be the type of Executable, " + "but received {}".format(type(exe)) ) if not isinstance(exe, Executable): exe = Executable(exe) self.module = exe.mod["vm_load_executable"]() self._exec = exe self._init = self.module["init"] self._invoke = self.module["invoke"] self._invoke_stateful = self.module["invoke_stateful"] self._get_output = self.module["get_output"] self._get_num_outputs = self.module["get_num_outputs"] self._get_input_index = self.module["get_input_index"] self._set_input = self.module["set_input"] self._setup_device(device, memory_cfg) def _setup_device(self, dev, memory_cfg): """Init devices and allocators.""" devs = dev if not isinstance(dev, (list, tuple)): if not isinstance(dev, tvm.runtime.Device): raise TypeError("dev is expected to be Device or List[Device]") devs = [dev] # CPU is required for executing shape functions if not any(c.device_type % RPC_SESS_MASK == tvm.cpu().device_type for c in devs): devs.append(tvm.cpu()) default_alloc_type = VirtualMachine.POOLED_ALLOCATOR if memory_cfg is None: memory_cfg = {} elif isinstance(memory_cfg, str): assert memory_cfg in ["naive", "pooled"] if memory_cfg == "naive": default_alloc_type = VirtualMachine.NAIVE_ALLOCATOR memory_cfg = {} elif not isinstance(memory_cfg, dict): raise TypeError( "memory_cfg is expected be string or dictionary, " + "but received {}".format(type(memory_cfg)) ) init_args = [] for device in devs: init_args.append(device.device_type % RPC_SESS_MASK) init_args.append(device.device_id) alloc_type = memory_cfg[device] if device in memory_cfg else default_alloc_type init_args.append(alloc_type) self._init(*init_args) def set_input(self, func_name, *args, **kwargs): """Set the input to a function. Parameters ---------- func_name : str The name of the function. args : list[tvm.runtime.NDArray] or list[np.ndarray] The arguments to the function. kwargs: dict of str to tvm.runtime.NDArray or np.ndarray Named arguments to the function. """ if kwargs: # kwargs is a super set of the required function parameters. We # only find the ones that are needed. func_params = self._exec.get_function_params(func_name) new_args = [None] * len(func_params) cnt = 0 for k in kwargs: if k in func_params: idx = func_params.index(k) new_args[idx] = kwargs[k] cnt += 1 assert len(args) + cnt == len(func_params) idx = 0 for i, arg in enumerate(new_args): if arg is None: new_args[i] = args[idx] idx += 1 args = new_args cargs = convert(args) self._set_input(func_name, *cargs) def invoke(self, func_name, *args, **kwargs): """Invoke a function. Parameters ---------- func_name : str The name of the function. args : list[tvm.runtime.NDArray] or list[np.ndarray] The arguments to the function. kwargs: dict of str to tvm.runtime.NDArray or np.ndarray Named arguments to the function. Returns ------- result : Object The output. """ if args or kwargs: self.set_input(func_name, *args, **kwargs) return self._invoke(func_name) def run(self, *args, **kwargs): """Run the main function. Parameters ---------- args : list[tvm.runtime.NDArray] or list[np.ndarray] The arguments to the function. kwargs: dict of str to tvm.runtime.NDArray or np.ndarray Named arguments to the function. Returns ------- result : Object The output. """ return self.invoke("main", *args, **kwargs) def invoke_stateful(self, func_name, *args, **kwargs): """Invoke a function and ignore the returned result. Use this function when running over rpc because it is currently impossible to return a ADT object over rpc. To get the outputs, use :py:func`get_outputs`. Parameters ---------- func_name : str The name of the function. args : list[tvm.runtime.NDArray] or list[np.ndarray] The arguments to the function. kwargs: dict of str to tvm.runtime.NDArray or np.ndarray Named arguments to the function. """ if args or kwargs: self.set_input(func_name, *args, **kwargs) self._invoke_stateful(func_name) def get_outputs(self): """Get the outputs from a call to :py:func`invoke_stateful`. Returns ------- outputs : List[NDArray] """ return [self._get_output(i) for i in range(self._get_num_outputs())] def get_input_index(self, input_name, func_name="main"): """Get inputs index via input name. Parameters ---------- name : str The input key name func_name : str The function name Returns ------- index: int The input index. -1 will be returned if the given input name is not found. """ return self._get_input_index(input_name, func_name) def benchmark( self, device, *args, func_name="main", repeat=5, number=5, min_repeat_ms=None, end_to_end=False, **kwargs, ): """Calculate runtime of a function by repeatedly calling it. Use this function to get an accurate measurement of the runtime of a function. The function is run multiple times in order to account for variability in measurements, processor speed or other external factors. Mean, median, standard deviation, min and max runtime are all reported. On GPUs, CUDA and ROCm specifically, special on-device timers are used so that synchonization and data transfer operations are not counted towards the runtime. This allows for fair comparison of runtimes across different functions and models. The `end_to_end` flag switches this behavior to include data transfer operations in the runtime. The benchmarking loop looks approximately like so: .. code-block:: python for r in range(repeat): time_start = now() for n in range(number): func_name() time_end = now() total_times.append((time_end - time_start)/number) Parameters ---------- func_name : str The function to benchmark repeat : int Number of times to run the outer loop of the timing code (see above). The output will contain `repeat` number of datapoints. number : int Number of times to run the inner loop of the timing code. This inner loop is run in between the timer starting and stopping. In order to amortize any timing overhead, `number` should be increased when the runtime of the function is small (less than a 1/10 of a millisecond). min_repeat_ms : Optional[float] If set, the inner loop will be run until it takes longer than `min_repeat_ms` milliseconds. This can be used to ensure that the function is run enough to get an accurate measurement. end_to_end : bool If set, include time to transfer input tensors to the device and time to transfer returned tensors in the total runtime. This will give accurate timings for end to end workloads. args : Sequence[Object] Arguments to the function. These are cached before running timing code, so that data transfer costs are not counted in the runtime. kwargs : Dict[str, Object] Named arguments to the function. These are cached like `args`. Returns ------- timing_results : BenchmarkResult Runtimes of the function. Use `.mean` to access the mean runtime, use `.results` to access the individual runtimes (in seconds). """ min_repeat_ms = 0 if min_repeat_ms is None else min_repeat_ms if end_to_end: # We need to unpack keyword arguments into positional arguments packed_args = list(args) for k, v in kwargs.items(): i = self.get_input_index(k, func_name) if i < 0: raise TypeError(f"{func_name}() got an unexpected keyword argument '{k}'") while i >= len(packed_args): packed_args.append(None) packed_args[i] = v return self.module.time_evaluator( "invoke_return_to_device", device, repeat=repeat, number=number, min_repeat_ms=min_repeat_ms, )(func_name, device.device_type % RPC_SESS_MASK, device.device_id, *packed_args) if args or kwargs: self.set_input(func_name, *args, **kwargs) return self.module.time_evaluator( "invoke", device, repeat=repeat, number=number, min_repeat_ms=min_repeat_ms )(func_name)
utils/gyb.py
lwhsu/swift
3,016
11129930
<reponame>lwhsu/swift #!/usr/bin/env python # GYB: Generate Your Boilerplate (improved names welcome; at least # this one's short). See -h output for instructions from __future__ import print_function import os import re import sys try: from cStringIO import StringIO except ImportError: from io import StringIO import textwrap import tokenize from bisect import bisect try: basestring except NameError: basestring = str def get_line_starts(s): """Return a list containing the start index of each line in s. The list also contains a sentinel index for the end of the string, so there will be one more element in the list than there are lines in the string """ starts = [0] for line in s.split('\n'): starts.append(starts[-1] + len(line) + 1) starts[-1] -= 1 return starts def strip_trailing_nl(s): """If s ends with a newline, drop it; else return s intact""" return s[:-1] if s.endswith('\n') else s def split_lines(s): """Split s into a list of lines, each of which has a trailing newline If the lines are later concatenated, the result is s, possibly with a single appended newline. """ return [l + '\n' for l in s.split('\n')] # text on a line up to the first '$$', '${', or '%%' literalText = r'(?: [^$\n%] | \$(?![${]) | %(?!%) )*' # The part of an '%end' line that follows the '%' sign linesClose = r'[\ \t]* end [\ \t]* (?: \# .* )? $' # Note: Where "# Absorb" appears below, the regexp attempts to eat up # through the end of ${...} and %{...}% constructs. In reality we # handle this with the Python tokenizer, which avoids mis-detections # due to nesting, comments and strings. This extra absorption in the # regexp facilitates testing the regexp on its own, by preventing the # interior of some of these constructs from being treated as literal # text. tokenize_re = re.compile( r''' # %-lines and %{...}-blocks # \n? # absorb one preceding newline ^ (?: (?P<gybLines> (?P<_indent> [\ \t]* % (?! [{%] ) [\ \t]* ) (?! [\ \t] | ''' + linesClose + r''' ) .* ( \n (?P=_indent) (?! ''' + linesClose + r''' ) .* ) * ) | (?P<gybLinesClose> [\ \t]* % [ \t]* ''' + linesClose + r''' ) | [\ \t]* (?P<gybBlockOpen> %\{ ) (?: [^}]| \} (?!%) )* \}% # Absorb ) \n? # absorb one trailing newline # Substitutions | (?P<substitutionOpen> \$\{ ) [^}]* \} # Absorb # %% and $$ are literal % and $ respectively | (?P<symbol>[$%]) (?P=symbol) # Literal text | (?P<literal> ''' + literalText + r''' (?: # newline that doesn't precede space+% (?: \n (?! [\ \t]* %[^%] ) ) ''' + literalText + r''' )* \n? ) ''', re.VERBOSE | re.MULTILINE) gyb_block_close = re.compile(r'\}%[ \t]*\n?') def token_pos_to_index(token_pos, start, line_starts): """Translate a tokenize (line, column) pair into an absolute position in source text given the position where we started tokenizing and a list that maps lines onto their starting character indexes. """ relative_token_line_plus1, token_col = token_pos # line number where we started tokenizing start_line_num = bisect(line_starts, start) - 1 # line number of the token in the whole text abs_token_line = relative_token_line_plus1 - 1 + start_line_num # if found in the first line, adjust the end column to account # for the extra text if relative_token_line_plus1 == 1: token_col += start - line_starts[start_line_num] # Sometimes tokenizer errors report a line beyond the last one if abs_token_line >= len(line_starts): return line_starts[-1] return line_starts[abs_token_line] + token_col def tokenize_python_to_unmatched_close_curly(source_text, start, line_starts): """Apply Python's tokenize to source_text starting at index start while matching open and close curly braces. When an unmatched close curly brace is found, return its index. If not found, return len(source_text). If there's a tokenization error, return the position of the error. """ stream = StringIO(source_text) stream.seek(start) nesting = 0 try: for kind, text, token_start, token_end, line_text \ in tokenize.generate_tokens(stream.readline): if text == '{': nesting += 1 elif text == '}': nesting -= 1 if nesting < 0: return token_pos_to_index(token_start, start, line_starts) except tokenize.TokenError as error: (message, error_pos) = error.args return token_pos_to_index(error_pos, start, line_starts) return len(source_text) def tokenize_template(template_text): r"""Given the text of a template, returns an iterator over (tokenType, token, match) tuples. **Note**: this is template syntax tokenization, not Python tokenization. When a non-literal token is matched, a client may call iter.send(pos) on the iterator to reset the position in template_text at which scanning will resume. This function provides a base level of tokenization which is then refined by ParseContext.token_generator. >>> from pprint import * >>> pprint(list((kind, text) for kind, text, _ in tokenize_template( ... '%for x in range(10):\n% print x\n%end\njuicebox'))) [('gybLines', '%for x in range(10):\n% print x'), ('gybLinesClose', '%end'), ('literal', 'juicebox')] >>> pprint(list((kind, text) for kind, text, _ in tokenize_template( ... '''Nothing ... % if x: ... % for i in range(3): ... ${i} ... % end ... % else: ... THIS SHOULD NOT APPEAR IN THE OUTPUT ... '''))) [('literal', 'Nothing\n'), ('gybLines', '% if x:\n% for i in range(3):'), ('substitutionOpen', '${'), ('literal', '\n'), ('gybLinesClose', '% end'), ('gybLines', '% else:'), ('literal', 'THIS SHOULD NOT APPEAR IN THE OUTPUT\n')] >>> for kind, text, _ in tokenize_template(''' ... This is $some$ literal stuff containing a ${substitution} ... followed by a %{...} block: ... %{ ... # Python code ... }% ... and here $${are} some %-lines: ... % x = 1 ... % y = 2 ... % if z == 3: ... % print '${hello}' ... % end ... % for x in zz: ... % print x ... % # different indentation ... % twice ... and some lines that literally start with a %% token ... %% first line ... %% second line ... '''): ... print((kind, text.strip().split('\n',1)[0])) ('literal', 'This is $some$ literal stuff containing a') ('substitutionOpen', '${') ('literal', 'followed by a %{...} block:') ('gybBlockOpen', '%{') ('literal', 'and here ${are} some %-lines:') ('gybLines', '% x = 1') ('gybLinesClose', '% end') ('gybLines', '% for x in zz:') ('gybLines', '% # different indentation') ('gybLines', '% twice') ('literal', 'and some lines that literally start with a % token') """ pos = 0 end = len(template_text) saved_literal = [] literal_first_match = None while pos < end: m = tokenize_re.match(template_text, pos, end) # pull out the one matched key (ignoring internal patterns starting # with _) ((kind, text), ) = ( (kind, text) for (kind, text) in m.groupdict().items() if text is not None and kind[0] != '_') if kind in ('literal', 'symbol'): if len(saved_literal) == 0: literal_first_match = m # literals and symbols get batched together saved_literal.append(text) pos = None else: # found a non-literal. First yield any literal we've accumulated if saved_literal != []: yield 'literal', ''.join(saved_literal), literal_first_match saved_literal = [] # Then yield the thing we found. If we get a reply, it's # the place to resume tokenizing pos = yield kind, text, m # If we were not sent a new position by our client, resume # tokenizing at the end of this match. if pos is None: pos = m.end(0) else: # Client is not yet ready to process next token yield if saved_literal != []: yield 'literal', ''.join(saved_literal), literal_first_match def split_gyb_lines(source_lines): r"""Return a list of lines at which to split the incoming source These positions represent the beginnings of python line groups that will require a matching %end construct if they are to be closed. >>> src = split_lines('''\ ... if x: ... print x ... if y: # trailing comment ... print z ... if z: # another comment\ ... ''') >>> s = split_gyb_lines(src) >>> len(s) 2 >>> src[s[0]] ' print z\n' >>> s[1] - len(src) 0 >>> src = split_lines('''\ ... if x: ... if y: print 1 ... if z: ... print 2 ... pass\ ... ''') >>> s = split_gyb_lines(src) >>> len(s) 1 >>> src[s[0]] ' if y: print 1\n' >>> src = split_lines('''\ ... if x: ... if y: ... print 1 ... print 2 ... ''') >>> s = split_gyb_lines(src) >>> len(s) 2 >>> src[s[0]] ' if y:\n' >>> src[s[1]] ' print 1\n' """ last_token_text, last_token_kind = None, None unmatched_indents = [] dedents = 0 try: for token_kind, token_text, token_start, \ (token_end_line, token_end_col), line_text \ in tokenize.generate_tokens(lambda i=iter(source_lines): next(i)): if token_kind in (tokenize.COMMENT, tokenize.ENDMARKER): continue if token_text == '\n' and last_token_text == ':': unmatched_indents.append(token_end_line) # The tokenizer appends dedents at EOF; don't consider # those as matching indentations. Instead just save them # up... if last_token_kind == tokenize.DEDENT: dedents += 1 # And count them later, when we see something real. if token_kind != tokenize.DEDENT and dedents > 0: unmatched_indents = unmatched_indents[:-dedents] dedents = 0 last_token_text, last_token_kind = token_text, token_kind except tokenize.TokenError: # Let the later compile() call report the error return [] if last_token_text == ':': unmatched_indents.append(len(source_lines)) return unmatched_indents def code_starts_with_dedent_keyword(source_lines): r"""Return True iff the incoming Python source_lines begin with "else", "elif", "except", or "finally". Initial comments and whitespace are ignored. >>> code_starts_with_dedent_keyword(split_lines('if x in y: pass')) False >>> code_starts_with_dedent_keyword(split_lines('except ifSomethingElse:')) True >>> code_starts_with_dedent_keyword( ... split_lines('\n# comment\nelse: # yes')) True """ token_text = None for token_kind, token_text, _, _, _ \ in tokenize.generate_tokens(lambda i=iter(source_lines): next(i)): if token_kind != tokenize.COMMENT and token_text.strip() != '': break return token_text in ('else', 'elif', 'except', 'finally') class ParseContext(object): """State carried through a parse of a template""" filename = '' template = '' line_starts = [] code_start_line = -1 code_text = None tokens = None # The rest of the tokens close_lines = False def __init__(self, filename, template=None): self.filename = os.path.abspath(filename) if sys.platform == 'win32': self.filename = self.filename.replace('\\', '/') if template is None: with open(filename) as f: self.template = f.read() else: self.template = template self.line_starts = get_line_starts(self.template) self.tokens = self.token_generator(tokenize_template(self.template)) self.next_token() def pos_to_line(self, pos): return bisect(self.line_starts, pos) - 1 def token_generator(self, base_tokens): r"""Given an iterator over (kind, text, match) triples (see tokenize_template above), return a refined iterator over token_kinds. Among other adjustments to the elements found by base_tokens, this refined iterator tokenizes python code embedded in template text to help determine its true extent. The expression "base_tokens.send(pos)" is used to reset the index at which base_tokens resumes scanning the underlying text. >>> ctx = ParseContext('dummy', ''' ... %for x in y: ... % print x ... % end ... literally ... ''') >>> while ctx.token_kind: ... print((ctx.token_kind, ctx.code_text or ctx.token_text)) ... ignored = ctx.next_token() ('literal', '\n') ('gybLinesOpen', 'for x in y:\n') ('gybLines', ' print x\n') ('gybLinesClose', '% end') ('literal', 'literally\n') >>> ctx = ParseContext('dummy', ... '''Nothing ... % if x: ... % for i in range(3): ... ${i} ... % end ... % else: ... THIS SHOULD NOT APPEAR IN THE OUTPUT ... ''') >>> while ctx.token_kind: ... print((ctx.token_kind, ctx.code_text or ctx.token_text)) ... ignored = ctx.next_token() ('literal', 'Nothing\n') ('gybLinesOpen', 'if x:\n') ('gybLinesOpen', ' for i in range(3):\n') ('substitutionOpen', 'i') ('literal', '\n') ('gybLinesClose', '% end') ('gybLinesOpen', 'else:\n') ('literal', 'THIS SHOULD NOT APPEAR IN THE OUTPUT\n') >>> ctx = ParseContext('dummy', ... '''% for x in [1, 2, 3]: ... % if x == 1: ... literal1 ... % elif x > 1: # add output line here to fix bug ... % if x == 2: ... literal2 ... % end ... % end ... % end ... ''') >>> while ctx.token_kind: ... print((ctx.token_kind, ctx.code_text or ctx.token_text)) ... ignored = ctx.next_token() ('gybLinesOpen', 'for x in [1, 2, 3]:\n') ('gybLinesOpen', ' if x == 1:\n') ('literal', 'literal1\n') ('gybLinesOpen', 'elif x > 1: # add output line here to fix bug\n') ('gybLinesOpen', ' if x == 2:\n') ('literal', 'literal2\n') ('gybLinesClose', '% end') ('gybLinesClose', '% end') ('gybLinesClose', '% end') """ for self.token_kind, self.token_text, self.token_match in base_tokens: kind = self.token_kind self.code_text = None # Do we need to close the current lines? self.close_lines = kind == 'gybLinesClose' # %{...}% and ${...} constructs if kind.endswith('Open'): # Tokenize text that follows as Python up to an unmatched '}' code_start = self.token_match.end(kind) self.code_start_line = self.pos_to_line(code_start) close_pos = tokenize_python_to_unmatched_close_curly( self.template, code_start, self.line_starts) self.code_text = self.template[code_start:close_pos] yield kind if (kind == 'gybBlockOpen'): # Absorb any '}% <optional-comment> \n' m2 = gyb_block_close.match(self.template, close_pos) if not m2: raise ValueError("Invalid block closure") next_pos = m2.end(0) else: assert kind == 'substitutionOpen' # skip past the closing '}' next_pos = close_pos + 1 # Resume tokenizing after the end of the code. base_tokens.send(next_pos) elif kind == 'gybLines': self.code_start_line = self.pos_to_line( self.token_match.start('gybLines')) indentation = self.token_match.group('_indent') # Strip off the leading indentation and %-sign source_lines = re.split( '^' + re.escape(indentation), self.token_match.group('gybLines') + '\n', flags=re.MULTILINE)[1:] if code_starts_with_dedent_keyword(source_lines): self.close_lines = True last_split = 0 for line in split_gyb_lines(source_lines): self.token_kind = 'gybLinesOpen' self.code_text = ''.join(source_lines[last_split:line]) yield self.token_kind last_split = line self.code_start_line += line - last_split self.close_lines = False self.code_text = ''.join(source_lines[last_split:]) if self.code_text: self.token_kind = 'gybLines' yield self.token_kind else: yield self.token_kind def next_token(self): """Move to the next token""" for kind in self.tokens: return self.token_kind self.token_kind = None _default_line_directive = \ '// ###sourceLocation(file: "%(file)s", line: %(line)d)' class ExecutionContext(object): """State we pass around during execution of a template""" def __init__(self, line_directive=_default_line_directive, **local_bindings): self.local_bindings = local_bindings self.line_directive = line_directive self.local_bindings['__context__'] = self self.result_text = [] self.last_file_line = None def append_text(self, text, file, line): # see if we need to inject a line marker if self.line_directive: if (file, line) != self.last_file_line: # We can only insert the line directive at a line break if len(self.result_text) == 0 \ or self.result_text[-1].endswith('\n'): substitutions = {'file': file, 'line': line + 1} format_str = self.line_directive + '\n' self.result_text.append(format_str % substitutions) # But if the new text contains any line breaks, we can create # one elif '\n' in text: i = text.find('\n') self.result_text.append(text[:i + 1]) # and try again self.append_text(text[i + 1:], file, line + 1) return self.result_text.append(text) self.last_file_line = (file, line + text.count('\n')) class ASTNode(object): """Abstract base class for template AST nodes""" def __init__(self): raise NotImplementedError("ASTNode.__init__ is not implemented.") def execute(self, context): raise NotImplementedError("ASTNode.execute is not implemented.") def __str__(self, indent=''): raise NotImplementedError("ASTNode.__str__ is not implemented.") def format_children(self, indent): if not self.children: return ' []' return '\n'.join( ['', indent + '['] + [x.__str__(indent + 4 * ' ') for x in self.children] + [indent + ']']) class Block(ASTNode): """A sequence of other AST nodes, to be executed in order""" children = [] def __init__(self, context): self.children = [] while context.token_kind and not context.close_lines: if context.token_kind == 'literal': node = Literal else: node = Code self.children.append(node(context)) def execute(self, context): for x in self.children: x.execute(context) def __str__(self, indent=''): return indent + 'Block:' + self.format_children(indent) class Literal(ASTNode): """An AST node that generates literal text""" def __init__(self, context): self.text = context.token_text start_position = context.token_match.start(context.token_kind) self.start_line_number = context.pos_to_line(start_position) self.filename = context.filename context.next_token() def execute(self, context): context.append_text(self.text, self.filename, self.start_line_number) def __str__(self, indent=''): return '\n'.join( [indent + x for x in ['Literal:'] + strip_trailing_nl(self.text).split('\n')]) class Code(ASTNode): """An AST node that is evaluated as Python""" code = None children = () kind = None def __init__(self, context): source = '' source_line_count = 0 def accumulate_code(): s = source + (context.code_start_line - source_line_count) * '\n' \ + textwrap.dedent(context.code_text) line_count = context.code_start_line + \ context.code_text.count('\n') context.next_token() return s, line_count eval_exec = 'exec' if context.token_kind.startswith('substitution'): eval_exec = 'eval' source, source_line_count = accumulate_code() source = '(' + source.strip() + ')' else: while context.token_kind == 'gybLinesOpen': source, source_line_count = accumulate_code() source += ' __children__[%d].execute(__context__)\n' % len( self.children) source_line_count += 1 self.children += (Block(context),) if context.token_kind == 'gybLinesClose': context.next_token() if context.token_kind == 'gybLines': source, source_line_count = accumulate_code() # Only handle a substitution as part of this code block if # we don't already have some %-lines. elif context.token_kind == 'gybBlockOpen': # Opening ${...} and %{...}% constructs source, source_line_count = accumulate_code() self.filename = context.filename self.start_line_number = context.code_start_line self.code = compile(source, context.filename, eval_exec) self.source = source def execute(self, context): # Save __children__ from the local bindings save_children = context.local_bindings.get('__children__') # Execute the code with our __children__ in scope context.local_bindings['__children__'] = self.children context.local_bindings['__file__'] = self.filename result = eval(self.code, context.local_bindings) if context.local_bindings['__children__'] is not self.children: raise ValueError("The code is not allowed to mutate __children__") # Restore the bindings context.local_bindings['__children__'] = save_children # If we got a result, the code was an expression, so append # its value if result is not None \ or (isinstance(result, basestring) and result != ''): from numbers import Number, Integral result_string = None if isinstance(result, Number) and not isinstance(result, Integral): result_string = repr(result) else: result_string = str(result) context.append_text( result_string, self.filename, self.start_line_number) def __str__(self, indent=''): source_lines = re.sub(r'^\n', '', strip_trailing_nl( self.source), flags=re.MULTILINE).split('\n') if len(source_lines) == 1: s = indent + 'Code: {' + source_lines[0] + '}' else: s = indent + 'Code:\n' + indent + '{\n' + '\n'.join( indent + 4 * ' ' + l for l in source_lines ) + '\n' + indent + '}' return s + self.format_children(indent) def expand(filename, line_directive=_default_line_directive, **local_bindings): r"""Return the contents of the given template file, executed with the given local bindings. >>> from tempfile import NamedTemporaryFile >>> # On Windows, the name of a NamedTemporaryFile cannot be used to open >>> # the file for a second time if delete=True. Therefore, we have to >>> # manually handle closing and deleting this file to allow us to open >>> # the file by its name across all platforms. >>> f = NamedTemporaryFile(delete=False) >>> f.write( ... r'''--- ... % for i in range(int(x)): ... a pox on ${i} for epoxy ... % end ... ${120 + ... ... 3} ... abc ... ${"w\nx\nX\ny"} ... z ... ''') >>> f.flush() >>> result = expand( ... f.name, ... line_directive='//#sourceLocation(file: "%(file)s", ' + \ ... 'line: %(line)d)', ... x=2 ... ).replace( ... '"%s"' % f.name.replace('\\', '/'), '"dummy.file"') >>> print(result, end='') //#sourceLocation(file: "dummy.file", line: 1) --- //#sourceLocation(file: "dummy.file", line: 3) a pox on 0 for epoxy //#sourceLocation(file: "dummy.file", line: 3) a pox on 1 for epoxy //#sourceLocation(file: "dummy.file", line: 5) 123 //#sourceLocation(file: "dummy.file", line: 8) abc w x X y //#sourceLocation(file: "dummy.file", line: 10) z >>> f.close() >>> os.remove(f.name) """ with open(filename) as f: t = parse_template(filename, f.read()) d = os.getcwd() os.chdir(os.path.dirname(os.path.abspath(filename))) try: return execute_template( t, line_directive=line_directive, **local_bindings) finally: os.chdir(d) def parse_template(filename, text=None): r"""Return an AST corresponding to the given template file. If text is supplied, it is assumed to be the contents of the file, as a string. >>> print(parse_template('dummy.file', text= ... '''% for x in [1, 2, 3]: ... % if x == 1: ... literal1 ... % elif x > 1: # add output line after this line to fix bug ... % if x == 2: ... literal2 ... % end ... % end ... % end ... ''')) Block: [ Code: { for x in [1, 2, 3]: __children__[0].execute(__context__) } [ Block: [ Code: { if x == 1: __children__[0].execute(__context__) elif x > 1: # add output line after this line to fix bug __children__[1].execute(__context__) } [ Block: [ Literal: literal1 ] Block: [ Code: { if x == 2: __children__[0].execute(__context__) } [ Block: [ Literal: literal2 ] ] ] ] ] ] ] >>> print(parse_template( ... 'dummy.file', ... text='%for x in range(10):\n% print(x)\n%end\njuicebox')) Block: [ Code: { for x in range(10): __children__[0].execute(__context__) } [ Block: [ Code: {print(x)} [] ] ] Literal: juicebox ] >>> print(parse_template('/dummy.file', text= ... '''Nothing ... % if x: ... % for i in range(3): ... ${i} ... % end ... % else: ... THIS SHOULD NOT APPEAR IN THE OUTPUT ... ''')) Block: [ Literal: Nothing Code: { if x: __children__[0].execute(__context__) else: __children__[1].execute(__context__) } [ Block: [ Code: { for i in range(3): __children__[0].execute(__context__) } [ Block: [ Code: {(i)} [] Literal: <BLANKLINE> ] ] ] Block: [ Literal: THIS SHOULD NOT APPEAR IN THE OUTPUT ] ] ] >>> print(parse_template('dummy.file', text='''% ... %for x in y: ... % print(y) ... ''')) Block: [ Code: { for x in y: __children__[0].execute(__context__) } [ Block: [ Code: {print(y)} [] ] ] ] >>> print(parse_template('dummy.file', text='''% ... %if x: ... % print(y) ... AAAA ... %else: ... BBBB ... ''')) Block: [ Code: { if x: __children__[0].execute(__context__) else: __children__[1].execute(__context__) } [ Block: [ Code: {print(y)} [] Literal: AAAA ] Block: [ Literal: BBBB ] ] ] >>> print(parse_template('dummy.file', text='''% ... %if x: ... % print(y) ... AAAA ... %# This is a comment ... %else: ... BBBB ... ''')) Block: [ Code: { if x: __children__[0].execute(__context__) # This is a comment else: __children__[1].execute(__context__) } [ Block: [ Code: {print(y)} [] Literal: AAAA ] Block: [ Literal: BBBB ] ] ] >>> print(parse_template('dummy.file', text='''\ ... %for x in y: ... AAAA ... %if x: ... BBBB ... %end ... CCCC ... ''')) Block: [ Code: { for x in y: __children__[0].execute(__context__) } [ Block: [ Literal: AAAA Code: { if x: __children__[0].execute(__context__) } [ Block: [ Literal: BBBB ] ] Literal: CCCC ] ] ] """ return Block(ParseContext(filename, text)) def execute_template( ast, line_directive=_default_line_directive, **local_bindings): r"""Return the text generated by executing the given template AST. Keyword arguments become local variable bindings in the execution context >>> root_directory = os.path.abspath('/') >>> file_name = (root_directory + 'dummy.file').replace('\\', '/') >>> ast = parse_template(file_name, text= ... '''Nothing ... % if x: ... % for i in range(3): ... ${i} ... % end ... % else: ... THIS SHOULD NOT APPEAR IN THE OUTPUT ... ''') >>> out = execute_template(ast, ... line_directive='//#sourceLocation(file: "%(file)s", line: %(line)d)', ... x=1) >>> out = out.replace(file_name, "DUMMY-FILE") >>> print(out, end="") //#sourceLocation(file: "DUMMY-FILE", line: 1) Nothing //#sourceLocation(file: "DUMMY-FILE", line: 4) 0 //#sourceLocation(file: "DUMMY-FILE", line: 4) 1 //#sourceLocation(file: "DUMMY-FILE", line: 4) 2 >>> ast = parse_template(file_name, text= ... '''Nothing ... % a = [] ... % for x in range(3): ... % a.append(x) ... % end ... ${a} ... ''') >>> out = execute_template(ast, ... line_directive='//#sourceLocation(file: "%(file)s", line: %(line)d)', ... x=1) >>> out = out.replace(file_name, "DUMMY-FILE") >>> print(out, end="") //#sourceLocation(file: "DUMMY-FILE", line: 1) Nothing //#sourceLocation(file: "DUMMY-FILE", line: 6) [0, 1, 2] >>> ast = parse_template(file_name, text= ... '''Nothing ... % a = [] ... % for x in range(3): ... % a.append(x) ... % end ... ${a} ... ''') >>> out = execute_template(ast, ... line_directive='#line %(line)d "%(file)s"', x=1) >>> out = out.replace(file_name, "DUMMY-FILE") >>> print(out, end="") #line 1 "DUMMY-FILE" Nothing #line 6 "DUMMY-FILE" [0, 1, 2] """ execution_context = ExecutionContext( line_directive=line_directive, **local_bindings) ast.execute(execution_context) return ''.join(execution_context.result_text) def main(): """ Lint this file. >>> import sys >>> gyb_path = os.path.realpath(__file__).replace('.pyc', '.py') >>> sys.path.append(os.path.dirname(gyb_path)) >>> import python_lint >>> python_lint.lint([gyb_path], verbose=False) 0 """ import argparse import sys parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description='Generate Your Boilerplate!', epilog=''' A GYB template consists of the following elements: - Literal text which is inserted directly into the output - %% or $$ in literal text, which insert literal '%' and '$' symbols respectively. - Substitutions of the form ${<python-expression>}. The Python expression is converted to a string and the result is inserted into the output. - Python code delimited by %{...}%. Typically used to inject definitions (functions, classes, variable bindings) into the evaluation context of the template. Common indentation is stripped, so you can add as much indentation to the beginning of this code as you like - Lines beginning with optional whitespace followed by a single '%' and Python code. %-lines allow you to nest other constructs inside them. To close a level of nesting, use the "%end" construct. - Lines beginning with optional whitespace and followed by a single '%' and the token "end", which close open constructs in %-lines. Example template: - Hello - %{ x = 42 def succ(a): return a+1 }% I can assure you that ${x} < ${succ(x)} % if int(y) > 7: % for i in range(3): y is greater than seven! % end % else: y is less than or equal to seven % end - The End. - When run with "gyb -Dy=9", the output is - Hello - I can assure you that 42 < 43 y is greater than seven! y is greater than seven! y is greater than seven! - The End. - ''' ) parser.add_argument( '-D', action='append', dest='defines', metavar='NAME=VALUE', default=[], help='''Bindings to be set in the template's execution context''') parser.add_argument( 'file', type=argparse.FileType(), help='Path to GYB template file (defaults to stdin)', nargs='?', default=sys.stdin) parser.add_argument( '-o', dest='target', type=argparse.FileType('w'), help='Output file (defaults to stdout)', default=sys.stdout) parser.add_argument( '--test', action='store_true', default=False, help='Run a self-test') parser.add_argument( '--verbose-test', action='store_true', default=False, help='Run a verbose self-test') parser.add_argument( '--dump', action='store_true', default=False, help='Dump the parsed template to stdout') parser.add_argument( '--line-directive', default=_default_line_directive, help=''' Line directive format string, which will be provided 2 substitutions, `%%(line)d` and `%%(file)s`. Example: `#sourceLocation(file: "%%(file)s", line: %%(line)d)` The default works automatically with the `line-directive` tool, which see for more information. ''') args = parser.parse_args(sys.argv[1:]) if args.test or args.verbose_test: import doctest selfmod = sys.modules[__name__] if doctest.testmod(selfmod, verbose=args.verbose_test or None).failed: sys.exit(1) bindings = dict(x.split('=', 1) for x in args.defines) ast = parse_template(args.file.name, args.file.read()) if args.dump: print(ast) # Allow the template to open files and import .py files relative to its own # directory os.chdir(os.path.dirname(os.path.abspath(args.file.name))) sys.path = ['.'] + sys.path args.target.write(execute_template(ast, args.line_directive, **bindings)) if __name__ == '__main__': main()
multi_script_editor/jedi/evaluate/compiled/fake.py
paulwinex/pw_multiScriptEditor
142
11129936
""" Loads functions that are mixed in to the standard library. E.g. builtins are written in C (binaries), but my autocompletion only understands Python code. By mixing in Python code, the autocompletion should work much better for builtins. """ import os import inspect from jedi._compatibility import is_py3, builtins, unicode from jedi.parser import Parser from jedi.parser import tokenize from jedi.parser.representation import Class from jedi.evaluate.helpers import FakeName modules = {} def _load_faked_module(module): module_name = module.__name__ if module_name == '__builtin__' and not is_py3: module_name = 'builtins' try: return modules[module_name] except KeyError: path = os.path.dirname(os.path.abspath(__file__)) try: with open(os.path.join(path, 'fake', module_name) + '.pym') as f: source = f.read() except IOError: modules[module_name] = None return module = Parser(unicode(source), module_name).module modules[module_name] = module if module_name == 'builtins' and not is_py3: # There are two implementations of `open` for either python 2/3. # -> Rename the python2 version (`look at fake/builtins.pym`). open_func = search_scope(module, 'open') open_func.name = FakeName('open_python3') open_func = search_scope(module, 'open_python2') open_func.name = FakeName('open') return module def search_scope(scope, obj_name): for s in scope.subscopes: if str(s.name) == obj_name: return s def get_module(obj): if inspect.ismodule(obj): return obj try: obj = obj.__objclass__ except AttributeError: pass try: imp_plz = obj.__module__ except AttributeError: # Unfortunately in some cases like `int` there's no __module__ return builtins else: return __import__(imp_plz) def _faked(module, obj, name): # Crazy underscore actions to try to escape all the internal madness. if module is None: module = get_module(obj) faked_mod = _load_faked_module(module) if faked_mod is None: return # Having the module as a `parser.representation.module`, we need to scan # for methods. if name is None: if inspect.isbuiltin(obj): return search_scope(faked_mod, obj.__name__) elif not inspect.isclass(obj): # object is a method or descriptor cls = search_scope(faked_mod, obj.__objclass__.__name__) if cls is None: return return search_scope(cls, obj.__name__) else: if obj == module: return search_scope(faked_mod, name) else: cls = search_scope(faked_mod, obj.__name__) if cls is None: return return search_scope(cls, name) def get_faked(module, obj, name=None): obj = obj.__class__ if is_class_instance(obj) else obj result = _faked(module, obj, name) if not isinstance(result, Class) and result is not None: # Set the docstr which was previously not set (faked modules don't # contain it). doc = '''"""%s"""''' % obj.__doc__ # TODO need escapes. result.add_docstr(tokenize.Token(tokenize.STRING, doc, (0, 0))) return result def is_class_instance(obj): """Like inspect.* methods.""" return not (inspect.isclass(obj) or inspect.ismodule(obj) or inspect.isbuiltin(obj) or inspect.ismethod(obj) or inspect.ismethoddescriptor(obj) or inspect.iscode(obj) or inspect.isgenerator(obj))
checkov/terraform/checks/resource/gcp/GoogleCloudSqlDatabaseRequireSsl.py
cclauss/checkov
4,013
11129947
from checkov.common.models.enums import CheckCategories from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck class GoogleCloudSqlDatabaseRequireSsl(BaseResourceValueCheck): def __init__(self): name = "Ensure all Cloud SQL database instance requires all incoming connections to use SSL" id = "CKV_GCP_6" supported_resources = ['google_sql_database_instance'] categories = [CheckCategories.NETWORKING] super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) def get_inspected_key(self): """ Looks for google_sql_database_instance which do not enforce SSL connections: :param conf: google_sql_database_instance configuration :return: < CheckResult > """ return 'settings/[0]/ip_configuration/[0]/require_ssl/[0]' check = GoogleCloudSqlDatabaseRequireSsl()
alipay/aop/api/domain/AlipayOverseasTravelGoodsSyncModel.py
antopen/alipay-sdk-python-all
213
11129962
<reponame>antopen/alipay-sdk-python-all<gh_stars>100-1000 #!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * from alipay.aop.api.domain.GolGoodsExtParam import GolGoodsExtParam from alipay.aop.api.domain.Amount import Amount from alipay.aop.api.domain.Amount import Amount from alipay.aop.api.domain.GoodsSalesVolume import GoodsSalesVolume class AlipayOverseasTravelGoodsSyncModel(object): def __init__(self): self._cover = None self._external_link_url = None self._gol_goods_ext_param = None self._goods_category = None self._goods_name = None self._goods_tags = None self._inventory_sync = None self._original_price = None self._out_goods_id = None self._out_shop_id = None self._out_shop_ids = None self._price = None self._recommend = None self._sale_end_time = None self._sale_start_time = None self._sales_volume = None self._scenarios = None self._status = None @property def cover(self): return self._cover @cover.setter def cover(self, value): self._cover = value @property def external_link_url(self): return self._external_link_url @external_link_url.setter def external_link_url(self, value): self._external_link_url = value @property def gol_goods_ext_param(self): return self._gol_goods_ext_param @gol_goods_ext_param.setter def gol_goods_ext_param(self, value): if isinstance(value, GolGoodsExtParam): self._gol_goods_ext_param = value else: self._gol_goods_ext_param = GolGoodsExtParam.from_alipay_dict(value) @property def goods_category(self): return self._goods_category @goods_category.setter def goods_category(self, value): self._goods_category = value @property def goods_name(self): return self._goods_name @goods_name.setter def goods_name(self, value): self._goods_name = value @property def goods_tags(self): return self._goods_tags @goods_tags.setter def goods_tags(self, value): if isinstance(value, list): self._goods_tags = list() for i in value: self._goods_tags.append(i) @property def inventory_sync(self): return self._inventory_sync @inventory_sync.setter def inventory_sync(self, value): self._inventory_sync = value @property def original_price(self): return self._original_price @original_price.setter def original_price(self, value): if isinstance(value, Amount): self._original_price = value else: self._original_price = Amount.from_alipay_dict(value) @property def out_goods_id(self): return self._out_goods_id @out_goods_id.setter def out_goods_id(self, value): self._out_goods_id = value @property def out_shop_id(self): return self._out_shop_id @out_shop_id.setter def out_shop_id(self, value): self._out_shop_id = value @property def out_shop_ids(self): return self._out_shop_ids @out_shop_ids.setter def out_shop_ids(self, value): if isinstance(value, list): self._out_shop_ids = list() for i in value: self._out_shop_ids.append(i) @property def price(self): return self._price @price.setter def price(self, value): if isinstance(value, Amount): self._price = value else: self._price = Amount.from_alipay_dict(value) @property def recommend(self): return self._recommend @recommend.setter def recommend(self, value): self._recommend = value @property def sale_end_time(self): return self._sale_end_time @sale_end_time.setter def sale_end_time(self, value): self._sale_end_time = value @property def sale_start_time(self): return self._sale_start_time @sale_start_time.setter def sale_start_time(self, value): self._sale_start_time = value @property def sales_volume(self): return self._sales_volume @sales_volume.setter def sales_volume(self, value): if isinstance(value, GoodsSalesVolume): self._sales_volume = value else: self._sales_volume = GoodsSalesVolume.from_alipay_dict(value) @property def scenarios(self): return self._scenarios @scenarios.setter def scenarios(self, value): if isinstance(value, list): self._scenarios = list() for i in value: self._scenarios.append(i) @property def status(self): return self._status @status.setter def status(self, value): self._status = value def to_alipay_dict(self): params = dict() if self.cover: if hasattr(self.cover, 'to_alipay_dict'): params['cover'] = self.cover.to_alipay_dict() else: params['cover'] = self.cover if self.external_link_url: if hasattr(self.external_link_url, 'to_alipay_dict'): params['external_link_url'] = self.external_link_url.to_alipay_dict() else: params['external_link_url'] = self.external_link_url if self.gol_goods_ext_param: if hasattr(self.gol_goods_ext_param, 'to_alipay_dict'): params['gol_goods_ext_param'] = self.gol_goods_ext_param.to_alipay_dict() else: params['gol_goods_ext_param'] = self.gol_goods_ext_param if self.goods_category: if hasattr(self.goods_category, 'to_alipay_dict'): params['goods_category'] = self.goods_category.to_alipay_dict() else: params['goods_category'] = self.goods_category if self.goods_name: if hasattr(self.goods_name, 'to_alipay_dict'): params['goods_name'] = self.goods_name.to_alipay_dict() else: params['goods_name'] = self.goods_name if self.goods_tags: if isinstance(self.goods_tags, list): for i in range(0, len(self.goods_tags)): element = self.goods_tags[i] if hasattr(element, 'to_alipay_dict'): self.goods_tags[i] = element.to_alipay_dict() if hasattr(self.goods_tags, 'to_alipay_dict'): params['goods_tags'] = self.goods_tags.to_alipay_dict() else: params['goods_tags'] = self.goods_tags if self.inventory_sync: if hasattr(self.inventory_sync, 'to_alipay_dict'): params['inventory_sync'] = self.inventory_sync.to_alipay_dict() else: params['inventory_sync'] = self.inventory_sync if self.original_price: if hasattr(self.original_price, 'to_alipay_dict'): params['original_price'] = self.original_price.to_alipay_dict() else: params['original_price'] = self.original_price if self.out_goods_id: if hasattr(self.out_goods_id, 'to_alipay_dict'): params['out_goods_id'] = self.out_goods_id.to_alipay_dict() else: params['out_goods_id'] = self.out_goods_id if self.out_shop_id: if hasattr(self.out_shop_id, 'to_alipay_dict'): params['out_shop_id'] = self.out_shop_id.to_alipay_dict() else: params['out_shop_id'] = self.out_shop_id if self.out_shop_ids: if isinstance(self.out_shop_ids, list): for i in range(0, len(self.out_shop_ids)): element = self.out_shop_ids[i] if hasattr(element, 'to_alipay_dict'): self.out_shop_ids[i] = element.to_alipay_dict() if hasattr(self.out_shop_ids, 'to_alipay_dict'): params['out_shop_ids'] = self.out_shop_ids.to_alipay_dict() else: params['out_shop_ids'] = self.out_shop_ids if self.price: if hasattr(self.price, 'to_alipay_dict'): params['price'] = self.price.to_alipay_dict() else: params['price'] = self.price if self.recommend: if hasattr(self.recommend, 'to_alipay_dict'): params['recommend'] = self.recommend.to_alipay_dict() else: params['recommend'] = self.recommend if self.sale_end_time: if hasattr(self.sale_end_time, 'to_alipay_dict'): params['sale_end_time'] = self.sale_end_time.to_alipay_dict() else: params['sale_end_time'] = self.sale_end_time if self.sale_start_time: if hasattr(self.sale_start_time, 'to_alipay_dict'): params['sale_start_time'] = self.sale_start_time.to_alipay_dict() else: params['sale_start_time'] = self.sale_start_time if self.sales_volume: if hasattr(self.sales_volume, 'to_alipay_dict'): params['sales_volume'] = self.sales_volume.to_alipay_dict() else: params['sales_volume'] = self.sales_volume if self.scenarios: if isinstance(self.scenarios, list): for i in range(0, len(self.scenarios)): element = self.scenarios[i] if hasattr(element, 'to_alipay_dict'): self.scenarios[i] = element.to_alipay_dict() if hasattr(self.scenarios, 'to_alipay_dict'): params['scenarios'] = self.scenarios.to_alipay_dict() else: params['scenarios'] = self.scenarios if self.status: if hasattr(self.status, 'to_alipay_dict'): params['status'] = self.status.to_alipay_dict() else: params['status'] = self.status return params @staticmethod def from_alipay_dict(d): if not d: return None o = AlipayOverseasTravelGoodsSyncModel() if 'cover' in d: o.cover = d['cover'] if 'external_link_url' in d: o.external_link_url = d['external_link_url'] if 'gol_goods_ext_param' in d: o.gol_goods_ext_param = d['gol_goods_ext_param'] if 'goods_category' in d: o.goods_category = d['goods_category'] if 'goods_name' in d: o.goods_name = d['goods_name'] if 'goods_tags' in d: o.goods_tags = d['goods_tags'] if 'inventory_sync' in d: o.inventory_sync = d['inventory_sync'] if 'original_price' in d: o.original_price = d['original_price'] if 'out_goods_id' in d: o.out_goods_id = d['out_goods_id'] if 'out_shop_id' in d: o.out_shop_id = d['out_shop_id'] if 'out_shop_ids' in d: o.out_shop_ids = d['out_shop_ids'] if 'price' in d: o.price = d['price'] if 'recommend' in d: o.recommend = d['recommend'] if 'sale_end_time' in d: o.sale_end_time = d['sale_end_time'] if 'sale_start_time' in d: o.sale_start_time = d['sale_start_time'] if 'sales_volume' in d: o.sales_volume = d['sales_volume'] if 'scenarios' in d: o.scenarios = d['scenarios'] if 'status' in d: o.status = d['status'] return o