max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
lit_nlp/examples/models/glue_models_int_test.py | eichinflo/lit | 2,854 | 12723478 | """Integration tests for lit_nlp.examples.models.glue_models."""
from absl.testing import absltest
from lit_nlp.examples.models import glue_models
import transformers
class GlueModelsIntTest(absltest.TestCase):
def test_sst2_model_predict(self):
# Create model.
model_path = "https://storage.googleapis.com/what-if-tool-resources/lit-models/sst2_tiny.tar.gz" # pylint: disable=line-too-long
if model_path.endswith(".tar.gz"):
model_path = transformers.file_utils.cached_path(
model_path, extract_compressed_file=True)
model = glue_models.SST2Model(model_path)
# Run prediction to ensure no failure.
model_in = [{"sentence": "test sentence"}]
model_out = list(model.predict(model_in))
# Sanity-check output vs output spec.
self.assertLen(model_out, 1)
for key in model.output_spec().keys():
self.assertIn(key, model_out[0].keys())
if __name__ == "__main__":
absltest.main()
|
boltstream/migrations/0004_user_uuid.py | geekpii/boltstream | 1,735 | 12723488 | <gh_stars>1000+
# Generated by Django 2.2 on 2019-05-12 18:20
import uuid
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("boltstream", "0003_streamsession")]
operations = [
migrations.AddField(
model_name="user",
name="uuid",
field=models.UUIDField(
default=uuid.uuid4, editable=False, unique=True, verbose_name="UUID"
),
)
]
|
code/old-version/restrictedBoltzmannMachine.py | diksha42/erecognition | 166 | 12723500 | """Implementation of restricted boltzmann machine
You need to be able to deal with different energy functions
This allows you to deal with real valued units.
TODO: monitor overfitting
"""
__author__ = "<NAME>"
__contact__ = "<EMAIL>"
import numpy as np
from common import *
EXPENSIVE_CHECKS_ON = False
# TODO: different learning rates for weights and biases
# TODO: nesterov method for momentum
# TODO: rmsprop
"""
Represents a RBM
"""
class RBM(object):
def __init__(self, nrVisible, nrHidden, trainingFunction, dropout,
visibleDropout, activationFun=sigmoid):
# dropout = 1 means no dropout, keep all the weights
self.dropout = dropout
# dropout = 1 means no dropout, keep all the weights
self.visibleDropout = visibleDropout
self.nrHidden = nrHidden
self.nrVisible = nrVisible
self.trainingFunction = trainingFunction
self.activationFun = activationFun
self.initialized = False
def train(self, data):
# If the network has not been initialized yet, do it now
# Ie if this is the time it is traning batch of traning
if not self.initialized:
self.weights = self.initializeWeights(self.nrVisible, self.nrHidden)
self.biases = self.intializeBiases(data, self.nrHidden)
# self.data = data
# else:
# self.data = np.concatenate(self.data, data)
self.biases, self.weights = self.trainingFunction(data,
self.biases,
self.weights,
self.activationFun,
self.dropout,
self.visibleDropout)
self.testWeights = self.weights * self.dropout
assert self.weights.shape == (self.nrVisible, self.nrHidden)
assert self.biases[0].shape[0] == self.nrVisible
assert self.biases[1].shape[0] == self.nrHidden
""" Reconstructs the data given using this boltzmann machine."""
def reconstruct(self, dataInstances):
return reconstruct(self.biases, self.testWeights, dataInstances,
self.activationFun)
def hiddenRepresentation(self, dataInstances):
return updateLayer(Layer.HIDDEN, dataInstances, self.biases,
self.testWeights, self.activationFun, True)
@classmethod
def initializeWeights(cls, nrVisible, nrHidden):
return np.random.normal(0, 0.01, (nrVisible, nrHidden))
@classmethod
def intializeBiases(cls, data, nrHidden):
# get the procentage of data points that have the i'th unit on
# and set the visible vias to log (p/(1-p))
percentages = data.mean(axis=0, dtype='float')
vectorized = np.vectorize(safeLogFraction, otypes=[np.float])
visibleBiases = vectorized(percentages)
hiddenBiases = np.zeros(nrHidden)
return np.array([visibleBiases, hiddenBiases])
def reconstruct(biases, weights, dataInstances, activationFun):
hidden = updateLayer(Layer.HIDDEN, dataInstances, biases, weights,
activationFun, True)
visibleReconstructions = updateLayer(Layer.VISIBLE, hidden,
biases, weights, activationFun, False)
return visibleReconstructions
def reconstructionError(biases, weights, data, activationFun):
# Returns the rmse of the reconstruction of the data
# Good to keep track of it, should decrease trough training
# Initially faster, and then slower
reconstructions = reconstruct(biases, weights, data, activationFun)
return rmse(reconstructions, data)
""" Training functions."""
""" Full CD function.
Arguments:
data: the data to use for traning.
A numpy ndarray.
biases:
Returns:
Defaults the mini batch size 1, so normal learning
"""
# Think of removing the step method all together and keep one to just
# optimize the code but also make it easier to change them
# rather than have a function that you pass in for every batch
# if nice and easy refactoring can be seen then you can do that
def contrastiveDivergence(data, biases, weights, activationFun, dropout,
visibleDropout, miniBatchSize=10):
N = len(data)
epochs = N / miniBatchSize
# sample the probabily distributions allow you to chose from the
# visible units for dropout
on = sample(visibleDropout, data.shape)
dropoutData = data * on
epsilon = 0.01
decayFactor = 0.0002
weightDecay = True
reconstructionStep = 50
oldDeltaWeights = np.zeros(weights.shape)
oldDeltaVisible = np.zeros(biases[0].shape)
oldDeltaHidden = np.zeros(biases[1].shape)
batchLearningRate = epsilon / miniBatchSize
print "batchLearningRate"
print batchLearningRate
for epoch in xrange(epochs):
batchData = dropoutData[epoch * miniBatchSize: (epoch + 1) * miniBatchSize, :]
if epoch < epochs / 100:
momentum = 0.5
else:
momentum = 0.95
if epoch < (N/7) * 10:
cdSteps = 3
elif epoch < (N/9) * 10:
cdSteps = 5
else:
cdSteps = 10
if EXPENSIVE_CHECKS_ON:
if epoch % reconstructionStep == 0:
print "reconstructionError"
print reconstructionError(biases, weights, data, activationFun)
weightsDiff, visibleBiasDiff, hiddenBiasDiff =\
modelAndDataSampleDiffs(batchData, biases, weights,
activationFun, dropout, cdSteps)
# Update the weights
# data - model
# Positive phase - negative
# Weight decay factor
deltaWeights = (batchLearningRate * weightsDiff
- epsilon * weightDecay * decayFactor * weights)
deltaVisible = batchLearningRate * visibleBiasDiff
deltaHidden = batchLearningRate * hiddenBiasDiff
deltaWeights += momentum * oldDeltaWeights
deltaVisible += momentum * oldDeltaVisible
deltaHidden += momentum * oldDeltaHidden
oldDeltaWeights = deltaWeights
oldDeltaVisible = deltaVisible
oldDeltaHidden = deltaHidden
# Update the weighths
weights += deltaWeights
# Update the visible biases
biases[0] += deltaVisible
# Update the hidden biases
biases[1] += deltaHidden
print reconstructionError(biases, weights, data, activationFun)
return biases, weights
def modelAndDataSampleDiffs(batchData, biases, weights, activationFun,
dropout, cdSteps):
# Reconstruct the hidden weigs from the data
hidden = updateLayer(Layer.HIDDEN, batchData, biases, weights, activationFun,
binary=True)
# Chose the units to be active at this point
# different sets for each element in the mini batches
on = sample(dropout, hidden.shape)
dropoutHidden = on * hidden
hiddenReconstruction = dropoutHidden
for i in xrange(cdSteps - 1):
visibleReconstruction = updateLayer(Layer.VISIBLE, hiddenReconstruction,
biases, weights, activationFun,
binary=False)
hiddenReconstruction = updateLayer(Layer.HIDDEN, visibleReconstruction,
biases, weights, activationFun,
binary=True)
# sample the hidden units active (for dropout)
hiddenReconstruction = hiddenReconstruction * on
# Do the last reconstruction from the probabilities in the last phase
visibleReconstruction = updateLayer(Layer.VISIBLE, hiddenReconstruction,
biases, weights, activationFun,
binary=False)
hiddenReconstruction = updateLayer(Layer.HIDDEN, visibleReconstruction,
biases, weights, activationFun,
binary=False)
hiddenReconstruction = hiddenReconstruction * on
# here it should be hidden * on - hiddenreconstruction
# also below in the hidden bias
weightsDiff = np.dot(batchData.T, dropoutHidden) -\
np.dot(visibleReconstruction.T, hiddenReconstruction)
assert weightsDiff.shape == weights.shape
visibleBiasDiff = np.sum(batchData - visibleReconstruction, axis=0)
assert visibleBiasDiff.shape == biases[0].shape
hiddenBiasDiff = np.sum(dropoutHidden - hiddenReconstruction, axis=0)
assert hiddenBiasDiff.shape == biases[1].shape
return weightsDiff, visibleBiasDiff, hiddenBiasDiff
""" Updates an entire layer. This procedure can be used both in training
and in testing.
Can even take multiple values of the layer, each of them given as rows
Uses matrix operations.
"""
def updateLayer(layer, otherLayerValues, biases, weights, activationFun,
binary=False):
bias = biases[layer]
size = otherLayerValues.shape[0]
if layer == Layer.VISIBLE:
activation = np.dot(otherLayerValues, weights.T)
else:
activation = np.dot(otherLayerValues, weights)
probs = activationFun(np.tile(bias, (size, 1)) + activation)
if binary:
# Sample from the distributions
return sampleAll(probs)
return probs
# Another training algorithm. Slower than Contrastive divergence, but
# gives better results. Not used in practice as it is too slow.
# This is what Hinton said but it is not OK due to NIPS paper
# This is huge code copy paste but keep it like this for now
def PCD(data, biases, weights, activationFun, dropout,
visibleDropout, miniBatchSize=10):
N = len(data)
epochs = N / miniBatchSize
# sample the probabily distributions allow you to chose from the
# visible units for dropout
# on = sample(visibleDropout, data.shape)
# dropoutData = data * on
dropoutData = data
epsilon = 0.01
decayFactor = 0.0002
weightDecay = True
reconstructionStep = 50
oldDeltaWeights = np.zeros(weights.shape)
oldDeltaVisible = np.zeros(biases[0].shape)
oldDeltaHidden = np.zeros(biases[1].shape)
batchLearningRate = epsilon / miniBatchSize
print "batchLearningRate"
print batchLearningRate
# make this an argument or something
nrFantasyParticles = miniBatchSize
fantVisible = np.random.randint(2, size=(nrFantasyParticles, weights.shape[0]))
fantHidden = np.random.randint(2, size=(nrFantasyParticles, weights.shape[1]))
fantasyParticles = (fantVisible, fantHidden)
steps = 10
for epoch in xrange(epochs):
batchData = dropoutData[epoch * miniBatchSize: (epoch + 1) * miniBatchSize, :]
if epoch < epochs / 100:
momentum = 0.5
else:
momentum = 0.95
if EXPENSIVE_CHECKS_ON:
if epoch % reconstructionStep == 0:
print "reconstructionError"
print reconstructionError(biases, weights, data, activationFun)
print fantasyParticles[0]
print fantasyParticles[1]
weightsDiff, visibleBiasDiff, hiddenBiasDiff, fantasyParticles =\
modelAndDataSampleDiffsPCD(batchData, biases, weights,
activationFun, dropout, steps, fantasyParticles)
# Update the weights
# data - model
# Positive phase - negative
# Weight decay factor
deltaWeights = (batchLearningRate * weightsDiff
- epsilon * weightDecay * decayFactor * weights)
deltaVisible = batchLearningRate * visibleBiasDiff
deltaHidden = batchLearningRate * hiddenBiasDiff
deltaWeights += momentum * oldDeltaWeights
deltaVisible += momentum * oldDeltaVisible
deltaHidden += momentum * oldDeltaHidden
oldDeltaWeights = deltaWeights
oldDeltaVisible = deltaVisible
oldDeltaHidden = deltaHidden
# Update the weighths
weights += deltaWeights
# Update the visible biases
biases[0] += deltaVisible
# Update the hidden biases
biases[1] += deltaHidden
print reconstructionError(biases, weights, data, activationFun)
return biases, weights
# Same modelAndDataSampleDiff but for persistent contrastive divergence
# First run it without dropout
def modelAndDataSampleDiffsPCD(batchData, biases, weights, activationFun,
dropout, steps, fantasyParticles):
# Reconstruct the hidden weigs from the data
hidden = updateLayer(Layer.HIDDEN, batchData, biases, weights, activationFun,
binary=True)
# Chose the units to be active at this point
# different sets for each element in the mini batches
# on = sample(dropout, hidden.shape)
# dropoutHidden = on * hidden
# hiddenReconstruction = dropoutHidden
for i in xrange(steps):
visibleReconstruction = updateLayer(Layer.VISIBLE, fantasyParticles[1],
biases, weights, activationFun,
binary=False)
hiddenReconstruction = updateLayer(Layer.HIDDEN, visibleReconstruction,
biases, weights, activationFun,
binary=True)
# sample the hidden units active (for dropout)
# hiddenReconstruction = hiddenReconstruction * on
fantasyParticles = (visibleReconstruction, hiddenReconstruction)
# here it should be hidden * on - hiddenReconstruction
# also below in the hidden bias
weightsDiff = np.dot(batchData.T, hidden) -\
np.dot(visibleReconstruction.T, hiddenReconstruction)
assert weightsDiff.shape == weights.shape
visibleBiasDiff = np.sum(batchData - visibleReconstruction, axis=0)
assert visibleBiasDiff.shape == biases[0].shape
hiddenBiasDiff = np.sum(hidden - hiddenReconstruction, axis=0)
assert hiddenBiasDiff.shape == biases[1].shape
return weightsDiff, visibleBiasDiff, hiddenBiasDiff, fantasyParticles |
DynaMaze/DynaQ+.py | nabeelfarooqui98/Reinforcement-Learning-Implementation | 116 | 12723568 | <filename>DynaMaze/DynaQ+.py
import numpy as np
ROWS = 6
COLS = 9
S = (2, 0)
G = (0, 8)
BLOCKS = [(1, 2), (2, 2), (3, 2), (0, 7), (1, 7), (2, 7), (4, 5)]
ACTIONS = ["left", "up", "right", "down"]
class Maze:
def __init__(self):
self.rows = ROWS
self.cols = COLS
self.start = S
self.goal = G
self.blocks = BLOCKS
self.state = S
self.end = False
# init maze
self.maze = np.zeros((self.rows, self.cols))
for b in self.blocks:
self.maze[b] = -1
def nxtPosition(self, action):
r, c = self.state
if action == "left":
c -= 1
elif action == "right":
c += 1
elif action == "up":
r -= 1
else:
r += 1
if (r >= 0 and r <= self.rows - 1) and (c >= 0 and c <= self.cols - 1):
if (r, c) not in self.blocks:
self.state = (r, c)
return self.state
def giveReward(self):
if self.state == self.goal:
self.end = True
return 1
else:
return 0
def showMaze(self):
self.maze[self.state] = 1
for i in range(0, self.rows):
print('-------------------------------------')
out = '| '
for j in range(0, self.cols):
if self.maze[i, j] == 1:
token = '*'
if self.maze[i, j] == -1:
token = 'z'
if self.maze[i, j] == 0:
token = '0'
out += token + ' | '
print(out)
print('-------------------------------------')
class DynaAgentPlus:
def __init__(self, exp_rate=0.3, lr=0.1, n_steps=5, episodes=1, timeWeight=1e-4):
self.time = 0 # keep track of the total time
self.timeWeight = timeWeight
self.maze = Maze()
self.state = S
self.actions = ACTIONS
self.state_actions = [] # state & action track
self.exp_rate = exp_rate
self.lr = lr
self.steps = n_steps
self.episodes = episodes # number of episodes going to play
self.steps_per_episode = []
self.Q_values = {}
# model function
self.model = {}
for row in range(ROWS):
for col in range(COLS):
self.Q_values[(row, col)] = {}
for a in self.actions:
self.Q_values[(row, col)][a] = 0
def chooseAction(self):
# epsilon-greedy
mx_nxt_reward = -999
action = ""
if np.random.uniform(0, 1) <= self.exp_rate:
action = np.random.choice(self.actions)
else:
# greedy action
current_position = self.state
# if all actions have same value, then select randomly
if len(set(self.Q_values[current_position].values())) == 1:
action = np.random.choice(self.actions)
else:
for a in self.actions:
nxt_reward = self.Q_values[current_position][a]
if nxt_reward >= mx_nxt_reward:
action = a
mx_nxt_reward = nxt_reward
return action
def reset(self):
self.maze = Maze()
self.state = S
self.state_actions = []
self.time = 0
def updateModel(self, state, nxtState, action, reward):
if state not in self.model.keys():
self.model[state] = {}
for a in self.actions:
# the initial model for such actions was that they would
# lead back to the same state with a reward of 0.
if a != action:
self.model[state][a] = (0, state, 1)
self.model[state][action] = (reward, nxtState, self.time)
def play(self):
self.steps_per_episode = []
for ep in range(self.episodes):
while not self.maze.end:
action = self.chooseAction()
self.state_actions.append((self.state, action))
nxtState = self.maze.nxtPosition(action)
reward = self.maze.giveReward()
# update Q-value
self.Q_values[self.state][action] += self.lr * (reward + np.max(list(self.Q_values[nxtState].values())) - self.Q_values[self.state][action])
# update model
self.updateModel(self.state, nxtState, action, reward)
self.state = nxtState
self.time += 1
# loop n times to randomly update Q-value
for _ in range(self.steps):
# randomly choose an state
rand_idx = np.random.choice(range(len(self.model.keys())))
_state = list(self.model)[rand_idx]
# randomly choose an action
rand_idx = np.random.choice(range(len(self.model[_state].keys())))
_action = list(self.model[_state])[rand_idx]
_reward, _nxtState, _time = self.model[_state][_action]
# update _reward
_reward += self.timeWeight * np.sqrt(self.time - _time)
self.Q_values[_state][_action] += self.lr * (_reward + np.max(list(self.Q_values[_nxtState].values())) - self.Q_values[_state][_action])
# end of game
if ep % 10 == 0:
print("episode", ep)
self.steps_per_episode.append(len(self.state_actions))
self.reset()
if __name__ == "__main__":
dap = DynaAgentPlus()
dap.play() |
__scraping__/centralbankofindia.co.in - scrapy/main.py | dhmo1900/python-examples | 140 | 12723584 |
# author: Bartlomiej "furas" Burek (https://blog.furas.pl)
# date: 2021.10.04
#
# title: Scrapy returning None on querying by xpath
# url: https://stackoverflow.com/questions/69442962/scrapy-returning-none-on-querying-by-xpath/69443343#69443343
# [Scrapy returning None on querying by xpath](https://stackoverflow.com/questions/69442962/scrapy-returning-none-on-querying-by-xpath/69443343#69443343)
import scrapy
class MySpider(scrapy.Spider):
start_urls = [
# f"https://www.centralbankofindia.co.in/en/branch-locator?field_state_target_id=All&combine=&page={i}"
# for i in range(0, 5)
# only first page - links to other pages it will find in HTML
"https://www.centralbankofindia.co.in/en/branch-locator?field_state_target_id=All&combine=&page=0"
]
name = "Central Bank of India"
def parse(self, response):
print(f'url: {response.url}')
all_items = response.xpath('//*[@id="block-cbi-content"]//td[2]//span[2]/text()').extract()
for address in all_items:
print(address)
yield {'address': address}
# get link to next page
next_page = response.xpath('//a[@rel="next"]/@href').extract_first()
if next_page:
print(f'Next Page: {next_page}')
yield response.follow(next_page)
# --- run without project and save in `output.csv` ---
from scrapy.crawler import CrawlerProcess
c = CrawlerProcess({
'USER_AGENT': 'Mozilla/5.0',
# save in file CSV, JSON or XML
'FEEDS': {'output.csv': {'format': 'csv'}}, # new in 2.1
})
c.crawl(MySpider)
c.start()
|
raiden/tests/benchmark/_codespeed.py | tirkarthi/raiden | 2,101 | 12723603 | <filename>raiden/tests/benchmark/_codespeed.py
import json
import os
import warnings
import requests
try:
_CODESPEED_USER = os.environ["CODESPEED_USER"]
_CODESPEED_PASSWORD = os.environ["CODESPEED_PASSWORD"]
_BENCHMARK_HOST = os.environ["BENCHMARK_HOST"]
except KeyError:
warnings.warn(
"Codespeed environment variables not available, posting results would fail.",
RuntimeWarning,
)
def post_result(codespeed_url, commit_id, branch, bench_name, value):
data = [
{
"commitid": commit_id,
"project": "raiden",
"branch": branch,
"executable": "raiden",
"benchmark": bench_name,
"environment": _BENCHMARK_HOST,
"result_value": value,
}
]
data_ = {"json": json.dumps(data)}
url = codespeed_url + "/result/add/json/"
resp = requests.post(url, data=data_, auth=(_CODESPEED_USER, _CODESPEED_PASSWORD))
resp.raise_for_status()
|
container_files/ipython_extra_config.py | kstepanmpmg/mldb | 665 | 12723617 | c = get_config()
c.NotebookApp.ip = '{{IPYTHON_NB_LISTEN_ADDR}}'
c.NotebookApp.port = {{IPYTHON_NB_LISTEN_PORT}}
c.NotebookApp.open_browser = False
c.NotebookApp.notebook_dir = u'{{IPYTHON_NB_DIR}}'
c.NotebookApp.base_url = '{{HTTP_BASE_URL}}/{{IPYTHON_NB_PREFIX}}'
c.NotebookApp.tornado_settings = {'static_url_prefix':'{{HTTP_BASE_URL}}/{{IPYTHON_NB_PREFIX}}/static/'}
# Disable token auth for now
c.NotebookApp.token = ''
c.NotebookApp.password = ''
|
convertor.py | trankha1655/pan_pp.origin | 329 | 12723647 | <gh_stars>100-1000
import torch
import mmcv
import argparse
import os.path as osp
parser = argparse.ArgumentParser(description='Hyperparams')
parser.add_argument('checkpoint', nargs='?', type=str, default=None)
args = parser.parse_args()
dir_name = args.checkpoint.split("/")[-2]
checkpoint = torch.load(args.checkpoint, map_location='cpu')
state_dict = checkpoint['state_dict']
for k, v in state_dict.items():
print(k)
checkpoint = {'state_dict': state_dict}
mmcv.mkdir_or_exist("converted/")
try:
torch.save(checkpoint, osp.join("converted", dir_name+".pth.tar"), _use_new_zipfile_serialization=False)
except:
torch.save(checkpoint, osp.join("converted", dir_name+".pth.tar"))
|
pims/process.py | tsmbland/pims | 208 | 12723694 | <filename>pims/process.py
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from slicerator import pipeline, Pipeline
import six
@pipeline
def as_grey(frame):
"""Convert a 2D image or PIMS reader to greyscale.
This weights the color channels according to their typical
response to white light.
It does nothing if the input is already greyscale.
"""
if len(frame.shape) == 2:
return frame
else:
red = frame[:, :, 0]
green = frame[:, :, 1]
blue = frame[:, :, 2]
return 0.2125 * red + 0.7154 * green + 0.0721 * blue
# "Gray" is the more common spelling
as_gray = as_grey
# Source of this patch: https://github.com/scikit-image/scikit-image/pull/3556
# See also: https://github.com/numpy/numpy/pull/11966
from distutils.version import LooseVersion
if LooseVersion(np.__version__) < LooseVersion('1.16'):
from numpy.lib.arraypad import _validate_lengths as validate_lengths
else:
from numpy.lib.arraypad import _as_pairs
def validate_lengths(ar, crop_width):
return _as_pairs(crop_width, ar.ndim, as_index=True)
def _crop(frame, bbox):
return frame[bbox[0]:bbox[2], bbox[1]:bbox[3]]
@pipeline
class crop(Pipeline):
"""Crop image or image-reader`reader` by `crop_width` along each dimension.
Parameters
----------
ar : array-like of rank N
Input array.
crop_width : {sequence, int}
Number of values to remove from the edges of each axis.
``((before_1, after_1),`` ... ``(before_N, after_N))`` specifies
unique crop widths at the start and end of each axis.
``((before, after),)`` specifies a fixed start and end crop
for every axis.
``(n,)`` or ``n`` for integer ``n`` is a shortcut for
before = after = ``n`` for all axes.
order : {'C', 'F', 'A', 'K'}, optional
control the memory layout of the copy. See ``np.copy``.
Returns
-------
cropped : array
The cropped array.
See Also
--------
Source: ``skimage.util.crop`` (v0.12.3)
"""
def __init__(self, reader, crop_width, order='K'):
# We have to know the frame shape that is returned by the reader.
try: # In case the reader is a FramesSequence, there is an attribute
shape = reader.frame_shape
first_frame = np.empty(shape, dtype=bool)
except AttributeError:
first_frame = reader[0]
shape = first_frame.shape
# Validate the crop widths on the first frame
crops = validate_lengths(first_frame, crop_width)
self._crop_slices = tuple([slice(a, shape[i] - b)
for i, (a, b) in enumerate(crops)])
self._crop_shape = tuple([shape[i] - b - a
for i, (a, b) in enumerate(crops)])
self._crop_order = order
# We could pass _crop to proc_func. However this adds an extra copy
# operation. Therefore we define our own here.
super(self.__class__, self).__init__(None, reader)
def _get(self, key):
ar = self._ancestors[0][key]
return np.array(ar[self._crop_slices], order=self._crop_order,
copy=True)
@property
def frame_shape(self):
return self._crop_shape
|
tests/test_project/app_correct/models.py | christianbundy/django-migration-linter | 357 | 12723722 | from django.db import models
class A(models.Model):
null_field = models.IntegerField(null=True)
new_null_field = models.IntegerField(null=True)
|
ci-scripts/flatten_image.py | nstng/magma | 539 | 12723732 | """
Copyright 2022 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import subprocess # noqa: S404
import sys
def main() -> None:
"""Provide command-line options to flatten MAGMA-MME OAI image"""
args = _parse_args()
status = perform_flattening(args.tag)
sys.exit(status)
def _parse_args() -> argparse.Namespace:
"""Parse the command line args
Returns:
argparse.Namespace: the created parser
"""
parser = argparse.ArgumentParser(description='Flattening Image')
parser.add_argument(
'--tag', '-t',
action='store',
required=True,
help='Image Tag in image-name:image tag format',
)
return parser.parse_args()
def perform_flattening(tag):
"""Parse the command line args
Args:
tag: Image Tag in image-name:image tag format
Returns:
int: pass / fail status
"""
# First detect which docker/podman command to use
cli = ''
image_prefix = ''
cmd = 'which podman || true'
podman_check = subprocess.check_output(cmd, shell=True, universal_newlines=True) # noqa: S602
if podman_check.strip():
cli = 'sudo podman'
image_prefix = 'localhost/'
else:
cmd = 'which docker || true'
docker_check = subprocess.check_output(cmd, shell=True, universal_newlines=True) # noqa: S602
if docker_check.strip():
cli = 'docker'
image_prefix = ''
else:
print('No docker / podman installed: quitting')
return -1
print(f'Flattening {tag}')
# Creating a container
cmd = cli + ' run --name test-flatten --entrypoint /bin/true -d ' + tag
print(cmd)
subprocess.check_call(cmd, shell=True, universal_newlines=True) # noqa: S602
# Export / Import trick
cmd = cli + ' export test-flatten | ' + cli + ' import '
# Bizarro syntax issue with podman
if cli == 'docker':
cmd += ' --change "ENV PATH /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" '
else:
cmd += ' --change "ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" '
cmd += ' --change "WORKDIR /magma-mme" '
cmd += ' --change "EXPOSE 3870/tcp" '
cmd += ' --change "EXPOSE 5870/tcp" '
cmd += ' --change "EXPOSE 2123/udp" '
cmd += ' --change "CMD [\\"sleep\\", \\"infinity\\"]" ' # noqa: WPS342
cmd += ' - ' + image_prefix + tag
print(cmd)
subprocess.check_call(cmd, shell=True, universal_newlines=True) # noqa: S602
# Remove container
cmd = cli + ' rm -f test-flatten'
print(cmd)
subprocess.check_call(cmd, shell=True, universal_newlines=True) # noqa: S602
# At this point the original image is a dangling image.
# CI pipeline will clean up (`image prune --force`)
return 0
if __name__ == '__main__':
main()
|
boost_adaptbx/command_line/inexact.py | dperl-sol/cctbx_project | 155 | 12723786 | from __future__ import absolute_import, division, print_function
# LIBTBX_SET_DISPATCHER_NAME boost_adaptbx.inexact
import boost_adaptbx.boost.python as bp
import sys
def run(args):
assert len(args) == 0
print("Now creating a NaN in C++ as 0/0 ...")
sys.stdout.flush()
result = bp.ext.divide_doubles(0, 0)
print("Result:", result)
if (__name__ == "__main__"):
run(sys.argv[1:])
|
torchbenchmark/models/fastNLP/test/modules/decoder/test_seq2seq_decoder.py | Chillee/benchmark | 2,693 | 12723821 | import unittest
import torch
from fastNLP import Vocabulary
from fastNLP.embeddings import StaticEmbedding
from fastNLP.modules import TransformerSeq2SeqDecoder
from fastNLP.modules import LSTMSeq2SeqDecoder
from fastNLP import seq_len_to_mask
class TestTransformerSeq2SeqDecoder(unittest.TestCase):
def test_case(self):
vocab = Vocabulary().add_word_lst("This is a test .".split())
vocab.add_word_lst("Another test !".split())
embed = StaticEmbedding(vocab, embedding_dim=10)
encoder_output = torch.randn(2, 3, 10)
src_seq_len = torch.LongTensor([3, 2])
encoder_mask = seq_len_to_mask(src_seq_len)
for flag in [True, False]:
with self.subTest(bind_decoder_input_output_embed=flag):
decoder = TransformerSeq2SeqDecoder(embed=embed, pos_embed = None,
d_model = 10, num_layers=2, n_head = 5, dim_ff = 20, dropout = 0.1,
bind_decoder_input_output_embed = True)
state = decoder.init_state(encoder_output, encoder_mask)
output = decoder(tokens=torch.randint(0, len(vocab), size=(2, 4)), state=state)
self.assertEqual(output.size(), (2, 4, len(vocab)))
class TestLSTMDecoder(unittest.TestCase):
def test_case(self):
vocab = Vocabulary().add_word_lst("This is a test .".split())
vocab.add_word_lst("Another test !".split())
embed = StaticEmbedding(vocab, model_dir_or_name=None, embedding_dim=10)
encoder_output = torch.randn(2, 3, 10)
tgt_words_idx = torch.LongTensor([[1, 2, 3, 4], [2, 3, 0, 0]])
src_seq_len = torch.LongTensor([3, 2])
encoder_mask = seq_len_to_mask(src_seq_len)
for flag in [True, False]:
for attention in [True, False]:
with self.subTest(bind_decoder_input_output_embed=flag, attention=attention):
decoder = LSTMSeq2SeqDecoder(embed=embed, num_layers = 2, hidden_size = 10,
dropout = 0.3, bind_decoder_input_output_embed=flag, attention=attention)
state = decoder.init_state(encoder_output, encoder_mask)
output = decoder(tgt_words_idx, state)
self.assertEqual(tuple(output.size()), (2, 4, len(vocab)))
|
python/fate_client/pipeline/interface/data.py | hubert-he/FATE | 3,787 | 12723832 | <reponame>hubert-he/FATE<gh_stars>1000+
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.backend.config import VERSION
class Data(object):
def __init__(self, data=None, train_data=None, validate_data=None, test_data=None, predict_input=None):
self._data = data
self._train_data = train_data
self._validate_data = validate_data
self._test_data = test_data
self._predict_input = predict_input
def __getattr__(self, data_key):
if data_key == "train_data":
return self._train_data
elif data_key == "validate_data":
return self._validate_data
elif data_key == "test_data":
return self._test_data
elif data_key == "data":
return self._data
elif data_key == "predict_input":
return self._predict_input
else:
raise ValueError("data key {} not support".format(data_key))
|
datasets/data_path/gen_bdd100k_mot.py | anonymous4669/MOTR | 191 | 12723857 | import os
import numpy as np
import json
import cv2
from tqdm import tqdm
from collections import defaultdict
def convert(img_dir, split, label_dir, save_label_dir, filter_crowd=False, filter_ignore=False):
cat2id = {'train':6, 'car':3, 'bus':5, 'other person': 1, 'rider':2, 'pedestrian':1, 'other vehicle':3, 'motorcycle':7, 'bicycle':8, 'trailer':4, 'truck':4}
coco = defaultdict(list)
coco["categories"] = [
{"supercategory": "human", "id": 1, "name": "pedestrian"},
{"supercategory": "human", "id": 2, "name": "rider"},
{"supercategory": "vehicle", "id": 3, "name": "car"},
{"supercategory": "vehicle", "id": 4, "name": "truck"},
{"supercategory": "vehicle", "id": 5, "name": "bus"},
{"supercategory": "vehicle", "id": 6, "name": "train"},
{"supercategory": "bike", "id": 7, "name": "motorcycle"},
{"supercategory": "bike", "id": 8, "name": "bicycle"},
]
attr_id_dict = {
frame["name"]: frame["id"] for frame in coco["categories"]
}
all_categories = set()
img_dir = os.path.join(img_dir, split)
label_dir = os.path.join(label_dir, split)
vids = os.listdir(img_dir)
for vid in tqdm(vids):
txt_label_dir = os.path.join(save_label_dir, split, vid)
os.makedirs(txt_label_dir, exist_ok=True)
annos = json.load(open(os.path.join(label_dir, vid+'.json'), 'r'))
for anno in annos:
name = anno['name']
labels = anno['labels']
videoName = anno['videoName']
frameIndex = anno['frameIndex']
img = cv2.imread(os.path.join(img_dir, vid, name))
seq_height, seq_width, _ = img.shape
if len(labels) < 1:
continue
# for label in labels:
# category = label['category']
# all_categories.add(category)
with open(os.path.join(txt_label_dir, name.replace('jpg', 'txt')), 'w') as f:
for label in labels:
obj_id = label['id']
category = label['category']
attributes = label['attributes']
is_crowd = attributes['crowd']
if filter_crowd and is_crowd:
continue
if filter_ignore and (category not in attr_id_dict.keys()):
continue
box2d = label['box2d']
x1 = box2d['x1']
x2 = box2d['x2']
y1 = box2d['y1']
y2 = box2d['y2']
w = x2-x1
h = y2-y1
cx = (x1+x2) / 2
cy = (y1+y2) / 2
label_str = '{:d} {:d} {:.6f} {:.6f} {:.6f} {:.6f}\n'.format(
cat2id[category], int(obj_id), cx / seq_width, cy / seq_height, w / seq_width, h / seq_height)
f.write(label_str)
# print(f'all categories are {all_categories}.')
def generate_txt(img_dir,label_dir,txt_path='bdd100k.train',split='train'):
img_dir = os.path.join(img_dir, split)
label_dir = os.path.join(label_dir, split)
all_vids = os.listdir(img_dir)
all_frames = []
for vid in tqdm(all_vids):
fids = os.listdir(os.path.join(img_dir, vid))
fids.sort()
for fid in fids:
if os.path.exists(os.path.join(label_dir, vid, fid.replace('jpg', 'txt'))):
all_frames.append(f'images/track/{split}/{vid}/{fid}')
with open(txt_path, 'w') as f:
for frame in all_frames:
f.write(frame+'\n')
'''no filter'''
# img_dir = '/data/Dataset/bdd100k/bdd100k/images/track'
# label_dir = '/data/Dataset/bdd100k/bdd100k/labels/box_track_20'
# save_label_dir = '/data/Dataset/bdd100k/bdd100k/labels/track'
# split = 'train'
# convert(img_dir, split, label_dir, save_label_dir)
# img_dir = '/data/Dataset/bdd100k/bdd100k/images/track'
# label_dir = '/data/Dataset/bdd100k/bdd100k/labels/box_track_20'
# save_label_dir = '/data/Dataset/bdd100k/bdd100k/labels/track'
# split = 'val'
# convert(img_dir, split, label_dir, save_label_dir)
# img_dir = '/data/Dataset/bdd100k/bdd100k/images/track'
# label_dir = '/data/Dataset/bdd100k/bdd100k/labels/box_track_20'
# save_label_dir = '/data/Dataset/bdd100k/bdd100k/labels/track'
# split = 'train'
# generate_txt(img_dir,save_label_dir,txt_path='bdd100k.train',split='train')
# img_dir = '/data/Dataset/bdd100k/bdd100k/images/track'
# label_dir = '/data/Dataset/bdd100k/bdd100k/labels/box_track_20'
# save_label_dir = '/data/Dataset/bdd100k/bdd100k/labels/track'
# split = 'val'
# generate_txt(img_dir,save_label_dir,txt_path='bdd100k.val',split='val')
'''for filter'''
# img_dir = '/data/Dataset/bdd100k/bdd100k/images/track'
# label_dir = '/data/Dataset/bdd100k/bdd100k/labels/box_track_20'
# save_label_dir = '/data/Dataset/bdd100k/bdd100k/filter_labels/track'
# split = 'train'
# convert(img_dir, split, label_dir, save_label_dir, filter_crowd=True, filter_ignore=True)
# img_dir = '/data/Dataset/bdd100k/bdd100k/images/track'
# label_dir = '/data/Dataset/bdd100k/bdd100k/labels/box_track_20'
# save_label_dir = '/data/Dataset/bdd100k/bdd100k/filter_labels/track'
# split = 'val'
# convert(img_dir, split, label_dir, save_label_dir, filter_crowd=True, filter_ignore=True)
# img_dir = '/data/Dataset/bdd100k/bdd100k/images/track'
# label_dir = '/data/Dataset/bdd100k/bdd100k/labels/box_track_20'
# save_label_dir = '/data/Dataset/bdd100k/bdd100k/filter_labels/track'
# split = 'train'
# generate_txt(img_dir,save_label_dir,txt_path='filter.bdd100k.train',split='train')
# img_dir = '/data/Dataset/bdd100k/bdd100k/images/track'
# label_dir = '/data/Dataset/bdd100k/bdd100k/labels/box_track_20'
# save_label_dir = '/data/Dataset/bdd100k/bdd100k/filter_labels/track'
# split = 'val'
# generate_txt(img_dir,save_label_dir,txt_path='filter.bdd100k.val',split='val')
|
contrib/buildbot/test/test_testutil.py | syedrizwanmy/bitcoin-abc | 1,266 | 12723880 | <filename>contrib/buildbot/test/test_testutil.py
#!/usr/bin/env python3
#
# Copyright (c) 2020 The Bitcoin ABC developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import unittest
from testutil import AnyWith
class TestObject():
mystr = 'value'
mydict = {
'item': 'value',
}
def TestAnyWith(expected):
aw = AnyWith(TestObject, expected)
return aw.__eq__(TestObject())
class TestUtilTests(unittest.TestCase):
def test_compareWrongType(self):
# dict is not a TestObject
self.assertRaisesRegex(
AssertionError,
"Argument class type did not match",
AnyWith(
TestObject,
None).__eq__,
{})
def test_happyPaths(self):
self.assertRaisesRegex(
AssertionError, "Argument missing expected attribute", TestAnyWith, {
'does-not-exist': None})
self.assertRaisesRegex(
AssertionError, "Argument missing expected attribute", TestAnyWith, {
'does-not-exist': 'value'})
self.assertRaisesRegex(AssertionError,
"Argument missing expected attribute",
TestAnyWith,
{'does-not-exist': {'item': 'value'}})
TestAnyWith({'mystr': 'value'})
self.assertRaisesRegex(
AssertionError, "Argument attribute type did not match", TestAnyWith, {
'mystr': None})
self.assertRaisesRegex(
AssertionError, "Argument attribute type did not match", TestAnyWith, {
'mystr': {}})
self.assertRaisesRegex(
AssertionError, "Argument attribute value did not match", TestAnyWith, {
'mystr': 'wrong value'})
TestAnyWith({'mydict': {
'item': 'value',
}})
self.assertRaisesRegex(
AssertionError, "Argument attribute type did not match", TestAnyWith, {
'mydict': 'value'})
self.assertRaisesRegex(AssertionError, "Argument attribute value did not match", TestAnyWith, {'mydict': {
'item-does-not-exist': 'value'
}})
self.assertRaisesRegex(AssertionError, "Argument attribute value did not match", TestAnyWith, {'mydict': {
'item': None
}})
self.assertRaisesRegex(AssertionError, "Argument attribute value did not match", TestAnyWith, {'mydict': {
'item': 'wrong value'
}})
if __name__ == '__main__':
unittest.main()
|
price_analysis/fit.py | kevaundray/research | 1,351 | 12723943 | <reponame>kevaundray/research<gh_stars>1000+
import spread
import math
import random
o = spread.declutter(spread.load('diff_txs_price.csv'))
diffs = [float(q[2]) for q in o]
prices = [float(q[1]) for q in o]
txs = [float(q[3]) for q in o]
txfees = [float(q[4]) for q in o]
def simple_estimator(fac):
o = [1]
for i in range(1, len(diffs)):
o.append(o[-1] * diffs[i] * 1.0 / diffs[i-1] / fac)
return o
def minimax_estimator(fac):
o = [1]
for i in range(1, len(diffs)):
if diffs[i] * 1.0 / diffs[i-1] > fac:
o.append(o[-1] * diffs[i] * 1.0 / diffs[i-1] / fac)
elif diffs[i] > diffs[i-1]:
o.append(o[-1])
else:
o.append(o[-1] * diffs[i] * 1.0 / diffs[i-1])
return o
def diff_estimator(fac, dw, mf, exp=1):
o = [1]
derivs = [0] * 14
for i in range(14, len(diffs)):
derivs.append(diffs[i] - diffs[i - 14])
for i in range(0, 14):
derivs[i] = derivs[14]
vals = [max(diffs[i] + derivs[i] * dw, diffs[i] * mf) for i in range(len(diffs))]
for i in range(1, len(diffs)):
if vals[i] * 1.0 / vals[i-1] > fac:
o.append(o[-1] * 1.0 / fac * (vals[i] / vals[i-1])**exp)
elif vals[i] > vals[i-1]:
o.append(o[-1])
else:
o.append(o[-1] * 1.0 * (vals[i] / vals[i-1])**exp)
return o
def tx_diff_estimator(fac, dw, mf, lin=1, exp=1):
fac = (fac - 1) or 0.000001
o = [1]
initavg = sum([txs[i] for i in range(5)]) / 5.0
txavgs = [initavg] * 5
for i in range(5, len(txs)):
txavgs.append(txavgs[-1] * 0.8 + txs[i] * 0.2)
derivs = [0] * 14
for i in range(14, len(txavgs)):
derivs.append(txavgs[i] - txavgs[i - 14])
for i in range(0, 14):
derivs[i] = derivs[14]
vals = [max(txavgs[i] + derivs[i] * dw, txavgs[i] * mf) for i in range(len(txavgs))]
for i in range(1, len(txavgs)):
growth = (vals[i] * 1.0 / vals[i-1] - 1)
if growth > fac:
surplus = (growth / fac) - 1
o.append(o[-1] * (1 + (surplus * lin * fac) ** exp))
elif vals[i] > vals[i-1]:
o.append(o[-1])
else:
surplus = 1 - growth
o.append(o[-1] * (1 - (surplus * lin * fac) ** exp))
if i and o[-1] < o[-2] * mf:
o[-1] = o[-2] * mf
return o
def minimax_fee_estimator(fac, days):
o = [1]
initavg = sum([txs[i] for i in range(int(days))]) * 1.0 / days
txavgs = [initavg] * int(days)
for i in range(int(days), len(txs)):
txavgs.append(txavgs[-1] * 1.0 * (days-1) / days + txs[i] * 1.0 / days)
initavg2 = sum([txfees[i] for i in range(int(days))]) * 1.0 / days
txfeeavgs = [initavg2] * int(days)
for i in range(int(days), len(txs)):
txfeeavgs.append(txfeeavgs[-1] * 1.0 * (days-1) / days + txfees[i] * 1.0 / days)
# Calculate inverse fee, invfee ~= price
txavgfees = [t / f for f, t in zip(txfeeavgs, txavgs)]
for i in range(1, len(txavgfees)):
if txavgfees[i] * 1.0 / txavgfees[i-1] > fac:
o.append(o[-1] * txavgfees[i] * 1.0 / txavgfees[i-1] / fac)
elif txavgfees[i] > txavgfees[i-1]:
o.append(o[-1])
else:
o.append(o[-1] * txavgfees[i] * 1.0 / txavgfees[i-1])
return o
def ndiff_estimator(*args):
fac, dws, mf = args[0], args[1:-1], args[-1]
o = [1]
ds = [diffs]
for dw in dws:
derivs = [0] * 14
for i in range(14, len(diffs)):
derivs.append(ds[-1][i] - ds[-1][i - 14])
for i in range(0, 14):
derivs[i] = derivs[14]
ds.append(derivs)
vals = []
for i in range(len(diffs)):
q = ds[0][i] + sum([ds[j+1][i] * dws[j] for j in range(len(dws))])
vals.append(max(q, ds[0][i] * mf))
for i in range(1, len(diffs)):
if vals[i] * 1.0 / vals[i-1] > fac:
o.append(o[-1] * vals[i] * 1.0 / vals[i-1] / fac)
elif vals[i] > vals[i-1]:
o.append(o[-1])
else:
o.append(o[-1] * vals[i] * 1.0 / vals[i-1])
return o
def dual_threshold_estimator(fac1, fac2, dmul):
o = [1]
derivs = [0] * 14
for i in range(14, len(diffs)):
derivs.append(diffs[i] - diffs[i - 14])
for i in range(0, 14):
derivs[i] = derivs[14]
for i in range(1, len(diffs)):
if diffs[i] * 1.0 / diffs[i-1] > fac1 and derivs[i] * 1.0 / derivs[i-1] > fac2:
o.append(o[-1] * diffs[i] * 1.0 / diffs[i-1] / fac1 * (1 + (derivs[i] / derivs[i-1] - fac2) * dmul))
elif diffs[i] > diffs[i-1]:
o.append(o[-1])
else:
o.append(o[-1] * diffs[i] * 1.0 / diffs[i-1])
return o
infinity = 2.**1023
infinity *= 2
def evaluate_estimates(estimates, crossvalidate=False):
sz = len(prices) if crossvalidate else 780
sqdiffsum = 0
# compute average
tot = 0
for i in range(sz):
if estimates[i] == infinity or estimates[i] <= 0:
return 10**20
tot += math.log(prices[i] / estimates[i])
avg = 2.718281828459 ** (tot * 1.0 / sz)
if avg <= 0:
return 10**20
for i in range(1, sz):
sqdiffsum += math.log(prices[i] / estimates[i] / avg) ** 2
return sqdiffsum
# Simulated annealing optimizer
def optimize(producer, floors, ceilings, rate=0.7, rounds=5000, tries=1):
bestvals, besty = None, 10**21
for t in range(tries):
print 'Starting test %d of %d' % (t + 1, tries)
vals = [f*0.5+c*0.5 for f, c in zip(floors, ceilings)]
y = evaluate_estimates(producer(*vals))
for i in range(1, rounds):
stepsizes = [(f*0.5-c*0.5) / i**rate for f, c in zip(floors, ceilings)]
steps = [(random.random() * 2 - 1) * s for s in stepsizes]
newvals = [max(mi, min(ma, v+s)) for v, s, mi, ma in zip(vals, steps, floors, ceilings)]
newy = evaluate_estimates(producer(*newvals))
if newy < y:
vals = newvals
y = newy
if not i % 1000:
print i, vals, y
if y < besty:
bestvals, besty = vals, y
return bestvals
def score(producer, *vals):
return evaluate_estimates(producer(*vals), True)
|
codigo/Live172/chalice-lambdas/app.py | BrunoPontesLira/live-de-python | 572 | 12723948 | from chalice import Chalice, Rate
import logging
app = Chalice(app_name='chalice-lambdas')
app.log.setLevel(logging.DEBUG)
@app.route('/')
def index():
return {'message': 'Olar Chalice!'}
@app.route('/batatinhas')
def batatinhas():
return {'message': 'Olar batatinhas!'}
@app.route('/query')
def query():
return {
'message': 'Olar Query!',
'params': app.current_request.query_params
}
@app.route('/meu-post', methods=['POST'])
def post_func():
return {
'message': 'Olar Query!',
'params': app.current_request.json_body
}
@app.lambda_function(name='batata-function')
def my_lambda(request, context):
return {}
@app.schedule(Rate(1, unit=Rate.MINUTES))
def scheduler(event):
app.log.info('Executei o scheeeeedddddddd!!!!')
@app.on_s3_event(bucket='live-de-bucket')
def s3_event(event):
app.log.info(f'{event.bucket}, {event.key}')
|
src/lib/_typeAliases.py | t3kt/raytk | 108 | 12724015 | from typing import Union, Optional
from _stubs import *
class StrParamT(Par, Union[Par, str]):
def eval(self) -> str: pass
class IntParamT(Par, Union[Par, str, int]):
def eval(self) -> int: pass
class FloatParamT(Par, Union[Par, str, float, int]):
def eval(self) -> float: pass
class DatParamT(Par, Union[Par, str, DAT]):
def eval(self) -> Optional[DAT]: pass
class CompParamT(Par, Union[Par, str, COMP]):
def eval(self) -> Optional[COMP]: pass
class OPParamT(Par, Union[Par, str, OP, COMP, DAT, SOP, TOP, CHOP, MAT]):
def eval(self) -> Optional[Union[OP, COMP, DAT, SOP, TOP, CHOP, MAT]]: pass
class BoolParamT(Par, Union[Par, bool, int]):
def eval(self) -> bool: pass
|
scripts/data/kitti2bb3txt.py | wuzzh/master_thesis_code | 206 | 12724017 | """
Script for translating the KITTI 3D bounding box annotation format into the BB3TXT data format.
A BB3TXT file is formatted like this:
filename label confidence xmin ymin xmax ymax fblx fbly fbrx fbry rblx rbly ftly
filename label confidence xmin ymin xmax ymax fblx fbly fbrx fbry rblx rbly ftly
filename label confidence xmin ymin xmax ymax fblx fbly fbrx fbry rblx rbly ftly
...
----------------------------------------------------------------------------------------------------
python kitti2bb3txt.py path_labels path_images outfile.bb3txt
----------------------------------------------------------------------------------------------------
"""
__date__ = '03/17/2017'
__author__ = '<NAME>'
__email__ = '<EMAIL>'
import argparse
import os
import numpy as np
import cv2
from mappings.utils import LabelMappingManager
from mappings.utils import available_categories
from shared.geometry import R3x3_y, t3x1, Rt4x4
####################################################################################################
# DEFINITIONS #
####################################################################################################
# IMPORTANT !!
# The labels must translate precisely into the numbers in the kitti.yaml mapping file!
LABELS = {
'Car': 1,
'Van': 2,
'Truck': 3,
'Pedestrian': 4,
'Person_sitting': 5,
'Cyclist': 6,
'Tram': 7,
# Throw away 'Misc' and 'DontCare'
}
# Initialize the LabelMappingManager
LMM = LabelMappingManager()
MAPPING = LMM.get_mapping('kitti')
####################################################################################################
# FUNCTIONS #
####################################################################################################
def read_camera_matrix(line):
"""
Reads a camera matrix P (3x4) stored in the row-major scheme.
Input:
line: Row-major stored matrix separated by spaces, first element is the matrix name
Returns:
camera matrix P 4x4
"""
data = line.split(' ')
if data[0] != 'P2:':
print('ERROR: We need left camera matrix (P2)!')
exit(1)
P = np.asmatrix([[float(data[1]), float(data[2]), float(data[3]), float(data[4])],
[float(data[5]), float(data[6]), float(data[7]), float(data[8])],
[float(data[9]), float(data[10]), float(data[11]), float(data[12])]])
return P
def extract_3D_bb(data, P):
"""
Extract 3D bounding box coordinates in the image from the KITTI labels.
Input:
data: One split line of the label file (line.split(' '))
P: 3x4 camera projection matrix
Returns:
matrix of corners: fbr, rbr, fbl, rbl, ftr, rtr, ftl, rtl
"""
# Object dimensions
h = float(data[8])
w = float(data[9])
l = float(data[10])
# Position of the center point on the ground plane (xz plane)
cx = float(data[11])
cy = float(data[12])
cz = float(data[13])
# Rotation of the object around y
ry = float(data[14])
# 3D box corners - careful, the coordinate system of the car is that x points
# forward, not z! (It is rotated by 90deg with respect to the camera one)
# fbr, rbr, fbl, rbl, ftr, rtr, ftl, rtl
X = np.asmatrix([[l/2, -l/2, l/2, -l/2, l/2, -l/2, l/2, -l/2],
[0, 0, 0, 0, -h, -h, -h, -h],
[-w/2, -w/2, w/2, w/2, -w/2, -w/2, w/2, w/2],
[1, 1, 1, 1, 1, 1, 1, 1]])
# Rotate the 3D box around y axis and translate it to the correct position in the camera frame
X = Rt4x4(R3x3_y(ry), t3x1(cx, cy, cz)) * X
x = P * X
# x is in homogeneous coordinates -> get u, v
x = x / x[2,:]
x = x[0:2,:]
# image = cv2.imread(path_image)
# # Front
# cv2.line(image, (int(x[0,0]), int(x[1,0])), (int(x[0,2]), int(x[1,2])), (0,255,0), 3)
# cv2.line(image, (int(x[0,4]), int(x[1,4])), (int(x[0,6]), int(x[1,6])), (0,255,0))
# cv2.line(image, (int(x[0,0]), int(x[1,0])), (int(x[0,4]), int(x[1,4])), (0,255,0))
# cv2.line(image, (int(x[0,2]), int(x[1,2])), (int(x[0,6]), int(x[1,6])), (0,255,0), 3)
# # Rear
# cv2.line(image, (int(x[0,1]), int(x[1,1])), (int(x[0,3]), int(x[1,3])), (0,0,255))
# cv2.line(image, (int(x[0,5]), int(x[1,5])), (int(x[0,7]), int(x[1,7])), (0,0,255))
# cv2.line(image, (int(x[0,1]), int(x[1,1])), (int(x[0,5]), int(x[1,5])), (0,0,255))
# cv2.line(image, (int(x[0,3]), int(x[1,3])), (int(x[0,7]), int(x[1,7])), (0,0,255))
# # Connections
# cv2.line(image, (int(x[0,0]), int(x[1,0])), (int(x[0,1]), int(x[1,1])), (255,0,0))
# cv2.line(image, (int(x[0,2]), int(x[1,2])), (int(x[0,3]), int(x[1,3])), (255,0,0), 3)
# cv2.line(image, (int(x[0,4]), int(x[1,4])), (int(x[0,5]), int(x[1,5])), (255,0,0))
# cv2.line(image, (int(x[0,6]), int(x[1,6])), (int(x[0,7]), int(x[1,7])), (255,0,0))
# # Show image
# cv2.imshow('img', image)
# cv2.waitKey()
return x
def flip_3D_bb(x, image_width):
"""
Flips the annotation of the image around y axis.
Input:
x: coordinates of points fbr, rbr, fbl, rbl, ftr, rtr, ftl, rtl
image_width: width of the flipped image
Return:
x - flipped coordinates
"""
# First flip the x coordinates of the points
x[0,:] = image_width - x[0,:]
# Now switch left and right points
x_out = np.matrix(np.copy(x))
x_out[:,0] = x[:,2]
x_out[:,1] = x[:,3]
x_out[:,2] = x[:,0]
x_out[:,3] = x[:,1]
x_out[:,4] = x[:,6]
x_out[:,5] = x[:,7]
x_out[:,6] = x[:,4]
x_out[:,7] = x[:,5]
return x_out
def process_image(path_image, path_label_file, path_calib_file, label, flip, filter, outfile):
"""
Processes one image from the dataset and writes it out to the outfile.
Input:
path_image: Path to the image file
path_label_file: Path to the label file with KITTI labels
path_calib_file: Path to the calibration file for this image
label: Which class label should be extracted from the dataset (default None)
flip: True/False whether the images should also be flipped by this script
filter: True/False whether we should filter out very occluded and truncated boxes
outfile: File handle of the open output BBTXT file
"""
if flip:
# We have to flip the image and save it
image = cv2.imread(path_image)
image_width = image.shape[1]
filename = os.path.basename(path_image)
directory = os.path.dirname(path_image).rstrip('/') + '_flip'
path_image = os.path.join(directory, filename)
if not os.path.exists(directory):
os.makedirs(directory)
if not os.path.exists(path_image):
image = cv2.flip(image, 1)
cv2.imwrite(path_image, image)
with open(path_label_file, 'r') as infile_label, open(path_calib_file, 'r') as infile_calib:
# Read camera calibration matrices
for line in infile_calib:
if line[:2] == 'P2':
P = read_camera_matrix(line.rstrip('\n'))
# Read the objects
for line in infile_label:
line = line.rstrip('\n')
data = line.split(' ')
# First element of the data is the label. We don't want to process 'Misc' and
# 'DontCare' labels
if data[0] == 'Misc' or data[0] == 'DontCare': continue
# Check label, if required
if label is not None and MAPPING[LABELS[data[0]]] != label: continue
# We do not want to include objects, which are occluded or truncated too much
if filter and (int(data[2]) >= 2 or float(data[1]) > 0.75): continue
# Extract image coordinates (positions) of 3D bounding box corners, the corners are
# in the following order: fbr, rbr, fbl, rbl, ftr, rtr, ftl, rtl
x = extract_3D_bb(data, P)
if flip:
x = flip_3D_bb(x, image_width)
min_uv = np.min(x, axis=1) # xmin, ymin
max_uv = np.max(x, axis=1) # xmax, ymax
# The size of an image in KITTI is 1250x375. If the bounding box is significantly
# larger, discard it - probably just some large distortion from camera
if max_uv[1,0]-min_uv[1,0] > 700 or max_uv[0,0]-min_uv[0,0] > 1500:
continue
line_out = path_image + ' '
line_out += str(LABELS[data[0]]) + ' '
# For confidence we put one - just to have something
line_out += '1 '
# 3D bounding box is specified by the image coordinates of the front bottom left and
# right corners, rear bottom left corner and y coordinate of the front top left
# corner
line_out += str(min_uv[0,0]) + ' ' + str(min_uv[1,0]) + ' ' \
+ str(max_uv[0,0]) + ' ' + str(max_uv[1,0]) + ' ' \
+ str(x[0,2]) + ' ' + str(x[1,2]) + ' ' + str(x[0,0]) + ' ' \
+ str(x[1,0]) + ' ' + str(x[0,3]) + ' ' + str(x[1,3]) + ' ' \
+ str(x[1,6]) + '\n'
outfile.write(line_out)
def translate_file(path_labels, path_images, outfile, label, flip, filter):
"""
Runs the translation of the KITTI 3d bounding box label format into the BB3TXT format.
Input:
path_labels: Path to the "label_2" folder of the KITTI dataset
path_images: Path to the "image_2" folder with images from the KITTI dataset
outfile: File handle of the open output BBTXT file
label: Which class label should be extracted from the dataset (default None)
flip: True/False whether the images should also be flipped by this script
filter: True/False whether we should filter out very occluded and truncated boxes
"""
print('-- TRANSLATING KITTI TO BB3TXT')
# Get the list of all label files in the directory
filenames = [f for f in os.listdir(path_labels) if os.path.isfile(os.path.join(path_labels, f))]
if len(filenames) != 7481:
print('Wrong number (%d) of files in the KITTI dataset! Should be 7481.'%(len(filenames)))
return
# Read each file and write the labels from it
for f in filenames:
path_label_file = os.path.join(path_labels, f)
path_calib_file = os.path.join(path_labels.rstrip('/').rstrip('label_2'), 'calib', f)
if not os.path.exists(path_calib_file):
print('ERROR: We need camera calibration matrices "%s"'%(path_calib_file))
exit(1)
path_image = os.path.join(path_images, os.path.splitext(f)[0]) + '.png'
if not os.path.isfile(path_image):
print('WARNING: Image "%s" does not exist!'%(path_image))
process_image(path_image, path_label_file, path_calib_file, label, False, filter, outfile)
if flip:
# Add also the flipped image
process_image(path_image, path_label_file, path_calib_file, label, True, filter, outfile)
print('-- TRANSLATION DONE')
####################################################################################################
# MAIN #
####################################################################################################
def parse_arguments():
"""
Parse input options of the script.
"""
parser = argparse.ArgumentParser(description='Convert KITTI label files into BBTXT.')
parser.add_argument('path_labels', metavar='path_labels', type=str,
help='Path to the "label_2" folder of the KITTI dataset')
parser.add_argument('path_images', metavar='path_images', type=str,
help='Path to the "image_2" folder of the KITTI dataset')
parser.add_argument('outfile', metavar='path_outfile', type=argparse.FileType('w'),
help='Path to the output BBTXT file (including the extension)')
parser.add_argument('--label', metavar='label', type=str, default=None,
help='Single class of objects that should be separated from the dataset. ' \
'One from ' + str(available_categories(MAPPING)))
parser.add_argument('--flip', dest='flip', action='store_true', default=False,
help='If provided, the images will also be flipped')
parser.add_argument('--filter', dest='filter', action='store_true', default=False,
help='If provided, very occluded and truncated bounding boxes will be ' \
'filtered out')
args = parser.parse_args()
if not os.path.exists(args.path_labels):
print('Input path "%s" does not exist!'%(args.path_labels))
parser.print_help()
exit(1)
if not os.path.exists(args.path_images):
print('Input path "%s" does not exist!'%(args.path_images))
parser.print_help()
exit(1)
if args.label is not None and args.label not in available_categories(MAPPING):
print('Unknown class label "%s"!'%(args.label))
exit(1)
return args
def main():
args = parse_arguments()
translate_file(args.path_labels, args.path_images, args.outfile, args.label, args.flip,
args.filter)
args.outfile.close()
if __name__ == '__main__':
main()
|
roboticstoolbox/models/ETS/__init__.py | tassos/robotics-toolbox-python | 749 | 12724045 | from roboticstoolbox.models.ETS.Panda import Panda
from roboticstoolbox.models.ETS.Frankie import Frankie
from roboticstoolbox.models.ETS.Puma560 import Puma560
from roboticstoolbox.models.ETS.Planar_Y import Planar_Y
from roboticstoolbox.models.ETS.Planar2 import Planar2
from roboticstoolbox.models.ETS.GenericSeven import GenericSeven
from roboticstoolbox.models.ETS.Omni import Omni
__all__ = ["Panda", "Frankie", "Puma560", "Planar_Y", "Planar2", "GenericSeven", "Omni"]
|
mentalist/view/adder.py | qkum/mentalist | 1,293 | 12724050 | <gh_stars>1000+
import tkinter as Tk
from functools import partial
import datetime
import tkinter.messagebox
import locale
from .base_words import BaseWordsNode, center_window
from .const import NUMBER_LIST, DATE_FORMATS, SPECIAL_CHARACTERS
from .. import model
class AdderNode(BaseWordsNode):
'''Append and Prepend nodes. Inherits the file menu from BaseWordsNode.
'''
def __init__(self, controller, master=None, main=None, type_='Append', allow_remove=True, **kwargs):
BaseWordsNode.__init__(self, controller, master=master, main=main, title=type_, allow_remove=allow_remove, **kwargs)
self.sp_from = None
self.sp_to = None
self.custom_num_window = None
self.entry_string = None
self.date_format = Tk.StringVar()
self.special_dlg = None
self.chk_special = []
def add_upper_button(self):
mb = Tk.Menubutton(self.upper_frame, text=" + ", relief="raised", font=("Helvetica", "14"))
mb.menu = Tk.Menu(mb, tearoff=0)
mb["menu"] = mb.menu
label = 'No %s' % self.title
mb.menu.add_command(label=label, command=partial(self.controller.add_attr, label=label, node_view=self, attr_class=model.NothingAdderAttr))
# The first few attributes are the same as BaseFile
m_words = Tk.Menu(mb, tearoff=0)
mb.menu.add_cascade(label='Words', menu=m_words, underline=0)
m_words.add_command(label='Custom File...', command=partial(self.open_file_dlg, partial(self.controller.add_attr, label='File:', right_label_text='Calculating...', node_view=self, attr_class=model.FileAttr, controller=self.controller)))
m_words.add_command(label='Custom String...', command=partial(self.open_string_popup, 'String'))
self.add_file_menu(m_words, m_words)
# In addition to BaseFile's attributes, we have numbers, dates,
# and special characters
m_numbers = Tk.Menu(mb, tearoff=0)
mb.menu.add_cascade(label='Numbers', menu=m_numbers, underline=0)
m_numbers.add_command(label='User Defined...', command=self.open_custom_number_dlg)
for type_, range_str in NUMBER_LIST:
range_ = list(map(int, range_str.split('-')))
if type_ != 'years':
range_str = '-'.join(['0', locale.format('%d', range_[1], grouping=True)])
range_[1] += 1
m_numbers.add_command(label='{}: {}'.format(type_.capitalize(), range_str),
command=partial(
self.controller.add_attr, label='Numbers: {} {}'.format(type_.capitalize(), range_str), node_view=self, attr_class=model.RangeAttr, start=range_[0], end=range_[1]))
m_numbers.add_command(label='Dates...', command=self.open_date_dlg)
mb.menu.add_command(label="Special Characters...", command=self.open_special_dlg)
# Area and zip codes from lookup tables
for code_type in ['Area', 'Zip']:
m_area_zip = Tk.Menu(mb, tearoff=0)
mb.menu.add_cascade(label='{} Codes (US)'.format(code_type), menu=m_area_zip, underline=0)
for location_type in ['State', 'City']:
m_sub = Tk.Menu(m_area_zip, tearoff=0)
m_area_zip.add_cascade(label='By {}'.format(location_type), menu=m_sub, underline=0)
target_list = sorted(model.location_codes[location_type][code_type].keys())
for st in target_list:
label = '{} Codes: {} {}'.format(code_type, st, location_type if location_type == 'State' else '')
m_sub.add_command(label=st, command=partial(
self.controller.add_attr, label=label, node_view=self, attr_class=model.LocationCodeAttr, code_type=code_type, location=st, location_type=location_type))
mb.pack(side="left", fill="x", padx=10, pady=5)
def open_custom_number_dlg(self):
'''Opens a popup for defining a custom number range
'''
self.custom_num_window = Tk.Toplevel()
self.custom_num_window.withdraw()
self.custom_num_window.title('{}: Number Selection'.format(self.title))
self.custom_num_window.resizable(width=False, height=False)
frame = Tk.Frame(self.custom_num_window)
lb = Tk.Label(frame, text='Select Numbers to {}'.format(self.title))
lb.pack(fill='both', side='top')
# Boxes for inputting the start and endpoints
sp_box = Tk.Frame(frame)
lb1 = Tk.Label(sp_box, text='From')
lb1.grid(column=0, row=0, padx=5, sticky='E')
self.sp_from = Tk.Spinbox(sp_box, width=12, from_=0, to=10000)
self.sp_from.grid(column=1, row=0)
lb2 = Tk.Label(sp_box, text='To')
lb2.grid(column=0, row=1, padx=5, sticky='E')
self.sp_to = Tk.Spinbox(sp_box, width=12, from_=0, to=10000)
self.sp_to.grid(column=1, row=1)
# Optional zero padding
lb_zeros = Tk.Label(sp_box, text='Pad with zeros to width:')
lb_zeros.grid(column=0, row=2, sticky='E')
self.sp_zfill = Tk.Spinbox(sp_box, width=12, from_=0, to=10)
self.sp_zfill.grid(column=1, row=2)
sp_box.pack(fill='both', side='top', padx=30, pady=20)
# Cancel and Ok buttons
btn_box = Tk.Frame(frame)
btn_cancel = Tk.Button(btn_box, text='Cancel', command=self.cancel_custom_num_window)
btn_cancel.pack(side='right', padx=10, pady=20)
btn_ok = Tk.Button(btn_box, text='Ok', command=self.on_ok_custom_num_window)
btn_ok.pack(side='left', padx=10, pady=20)
btn_box.pack()
frame.pack(fill='both', padx=10, pady=10)
center_window(self.custom_num_window, self.main.master)
self.custom_num_window.focus_set()
def cancel_custom_num_window(self, *args):
'''Cancel was pressed
'''
if self.custom_num_window:
self.custom_num_window.destroy()
self.custom_num_window = None
def on_ok_custom_num_window(self, *args):
'''Ok was pressed, create the attribute
'''
try:
val_from = int(self.sp_from.get())
val_to = int(self.sp_to.get())
zfill = int(self.sp_zfill.get())
except ValueError:
tkinter.messagebox.showerror('Invalid Number', '"From", "To", and "Pad with zeros to width" must all be integers', parent=self.main)
return
if val_from > val_to:
tkinter.messagebox.showerror('Invalid Range', '"From" value must be less than or equal to "To"', parent=self.main)
elif val_to - val_from > 3000000:
tkinter.messagebox.showerror('Invalid Range', 'The range must be smaller than 3 million', parent=self.main)
else:
if zfill == 0:
label = 'Numbers: {} - {}'.format(val_from, val_to)
else:
label = 'Numbers: {} - {}, zero padding width: {}'.format(val_from, val_to, zfill)
self.controller.add_attr(label=label,
node_view=self,
attr_class=model.RangeAttr,
start=val_from, end=val_to+1,
zfill=zfill)
self.cancel_custom_num_window()
def open_date_dlg(self):
'''Open a popup for defining a range of dates
'''
self.custom_num_window = Tk.Toplevel()
self.custom_num_window.withdraw()
self.custom_num_window.title('{}: Date Selection'.format(self.title))
self.custom_num_window.resizable(width=False, height=False)
frame = Tk.Frame(self.custom_num_window)
lb = Tk.Label(frame, text='Select Dates to {}'.format(self.title))
lb.pack(fill='both', side='top')
# Boxes for inputting the start and endpoints
sp_box = Tk.Frame(frame)
lb1 = Tk.Label(sp_box, text='From')
lb1.pack(side='left', padx=5)
cur_year = datetime.date.today().year
self.sp_from = Tk.Spinbox(sp_box, width=12, from_=1950, to=cur_year)
self.sp_from.pack(side='left')
lb2 = Tk.Label(sp_box, text='To')
lb2.pack(side='left', padx=(50, 5))
var = Tk.IntVar()
var.set(str(cur_year))
self.sp_to = Tk.Spinbox(sp_box, width=12, from_=1950, to=cur_year, textvariable=var)
self.sp_to.pack(side='right')
sp_box.pack(fill='both', side='top', padx=30, pady=20)
# Choose how the dates are formatted (mmddyyyy etc.)
drop_down = Tk.OptionMenu(frame, self.date_format, *DATE_FORMATS)
drop_down.configure(width=max(map(len, DATE_FORMATS)) + 4)
self.date_format.set('mmddyy')
drop_down.pack(side='top')
self.date_zero_padding = Tk.IntVar()
checkbutton = Tk.Checkbutton(frame, text='Leading zero on single-digit d or m', relief=Tk.FLAT, variable=self.date_zero_padding)
checkbutton.pack()
# Ok and cancel buttons
btn_box = Tk.Frame(frame)
btn_cancel = Tk.Button(btn_box, text='Cancel', command=self.cancel_custom_num_window)
btn_cancel.pack(side='right', padx=10, pady=20)
btn_ok = Tk.Button(btn_box, text='Ok', command=self.on_ok_date_window)
btn_ok.pack(side='left', padx=10, pady=20)
btn_box.pack()
frame.pack(fill='both', padx=10, pady=10)
center_window(self.custom_num_window, self.main.master)
self.custom_num_window.focus_set()
def on_ok_date_window(self):
'''Ok was pressed, add the date range attribute
'''
year_limits = [1, 3000]
try:
val_from = int(self.sp_from.get())
val_to = int(self.sp_to.get())
except ValueError:
tkinter.messagebox.showerror('Invalid Value', '"From" year and "To" year must both be integers', parent=self.main)
return
if val_from > val_to:
tkinter.messagebox.showerror('Invalid Value', '"From" year must be less than or equal to "To" year', parent=self.main)
elif val_to - val_from > 200:
tkinter.messagebox.showerror('Invalid Value', 'Distance between "From" year and "To" year must be 200 or less', parent=self.main)
elif val_from < year_limits[0] or val_to > year_limits[1]:
tkinter.messagebox.showerror('Invalid Range', 'The year must be between {} and {}'.format(*year_limits), parent=self.main)
else:
label = 'Date: {} - {}, format: {}, {}'.format(val_from, val_to, self.date_format.get(), ['no leading zero', 'with leading zero'][self.date_zero_padding.get()==1])
self.controller.add_attr(label=label, node_view=self, attr_class=model.DateRangeAttr, start_year=val_from, end_year=val_to+1, format=self.date_format.get(), zero_padding=self.date_zero_padding.get()==1, controller=self.controller)
self.cancel_custom_num_window()
def open_special_dlg(self):
'''Open a popup for selecting special characters
'''
self.special_dlg = Tk.Toplevel()
self.special_dlg.withdraw()
self.special_dlg.title('Select Special Characters')
self.special_dlg.resizable(width=False, height=False)
frame = Tk.Frame(self.special_dlg)
lb = Tk.Label(frame, text='Select Special Characters'.format(self.title))
lb.pack(fill='both', side='top')
box = Tk.Frame(frame)
self.chk_special = []
max_column_checks = 15
for v, val in enumerate(SPECIAL_CHARACTERS):
var = Tk.IntVar()
tmp = Tk.Checkbutton(box, text=val, relief=Tk.FLAT, variable=var)
self.chk_special.append(var)
tmp.grid(row=v % max_column_checks, column=v // max_column_checks,
sticky='W', padx=10)
box.pack(fill='both', side='top', padx=30, pady=20)
# Ok and Cancel buttons
btn_box = Tk.Frame(frame)
btn_cancel = Tk.Button(btn_box, text='Cancel', command=self.cancel_special)
btn_cancel.pack(side='right', padx=10, pady=20)
btn_ok = Tk.Button(btn_box, text='Ok', command=self.on_ok_special_dlg)
btn_ok.pack(side='left', padx=10, pady=20)
btn_box.pack()
frame.pack(fill='both', padx=60, pady=10)
center_window(self.special_dlg, self.main.master)
self.special_dlg.focus_set()
def cancel_special(self, *args):
if self.special_dlg:
self.special_dlg.destroy()
self.special_dlg = None
def on_ok_special_dlg(self, *args):
'''Ok was pressed, add the special character attribute
'''
checked_vals = [SPECIAL_CHARACTERS[i] for i in range(len(SPECIAL_CHARACTERS)) if self.chk_special[i].get() == 1]
if len(checked_vals) > 0:
label = 'Special Characters: {}'.format(' '.join(checked_vals))
self.controller.add_attr(label=label, node_view=self, attr_class=model.StringListAttr, strings=checked_vals)
self.cancel_special()
|
Logistic Regression with StatsModels/logistic.py | joao-r-santos/DataSciencePython | 5,070 | 12724074 | <gh_stars>1000+
"""
Created on Wed Sep 09 12:38:16 2015
@author: ujjwal.karn
"""
import pandas as pd #for handling datasets
import statsmodels.api as sm #for statistical modeling
import pylab as pl #for plotting
import numpy as np #for numerical computation
# read the data in
dfTrain = pd.read_csv("C:\\Users\\ujjwal.karn\\Desktop\\Python\\train.csv")
dfTest = pd.read_csv("C:\\Users\\ujjwal.karn\\Desktop\\Python\\test.csv")
# take a look at the dataset
print dfTrain.head()
# admit gre gpa prestige
#0 0 380 3.61 good
#1 1 660 3.67 good
#2 1 800 4.00 best
#3 1 640 3.19 ok
#4 0 520 2.93 ok
print dfTest.head()
# gre gpa prestige
#0 640 3.30 veryGood
#1 660 3.60 good
#2 400 3.15 veryGood
#3 680 3.98 veryGood
#4 220 2.83 good
# summarize the data
print dfTrain.describe()
# admit gre gpa
#count 300.000000 300.000000 300.000000
#mean 0.306667 590.866667 3.386233
#std 0.461880 117.717630 0.374880
#min 0.000000 300.000000 2.260000
#25% 0.000000 515.000000 3.130000
#50% 0.000000 600.000000 3.390000
#75% 1.000000 680.000000 3.642500
#max 1.000000 800.000000 4.000000
# take a look at the standard deviation of each column
print dfTrain.std()
#admit 0.46188
#gre 117.71763
#gpa 0.37488
# frequency table cutting presitge and whether or not someone was admitted
print pd.crosstab(dfTrain['admit'], dfTrain['prestige'], rownames=['dmit'])
#prestige best good ok veryGood
#admit
#0 20 73 47 68
#1 25 19 9 39
#explore data
dfTrain.groupby('admit').mean()
# gre gpa
#admit
#0 573.461538 3.336587
#1 630.217391 3.498478
# plot one column
dfTrain['gpa'].hist()
pl.title('Histogram of GPA')
pl.xlabel('GPA')
pl.ylabel('Frequency')
pl.show()
# barplot of gre score grouped by admission status (True or False)
pd.crosstab(dfTrain.gre, dfTrain.admit.astype(bool)).plot(kind='bar')
pl.title('GRE score by Admission Status')
pl.xlabel('GRE score')
pl.ylabel('Frequency')
pl.show()
# dummify prestige
dummy_ranks = pd.get_dummies(dfTrain['prestige'], prefix='prestige')
print dummy_ranks.head()
# prestige_best prestige_good prestige_ok prestige_veryGood
#0 0 1 0 0
#1 0 1 0 0
#2 1 0 0 0
#3 0 0 1 0
#4 0 0 1 0
# create a clean data frame for the regression
cols_to_keep = ['admit', 'gre', 'gpa']
data = dfTrain[cols_to_keep].join(dummy_ranks.ix[:, 'prestige_good':])
print data.head()
# admit gre gpa prestige_good prestige_ok prestige_veryGood
#0 0 380 3.61 1 0 0
#1 1 660 3.67 1 0 0
#2 1 800 4.00 0 0 0
#3 1 640 3.19 0 1 0
#4 0 520 2.93 0 1 0
# manually add the intercept
data['intercept'] = 1.0
print data.head()
train_cols = data.columns[1:]
print data.columns[1:]
# Index([u'gre', u'gpa', u'prestige_good', u'prestige_ok', u'prestige_veryGood', u'intercept'], dtype='object')
#Logistic Regression
logit = sm.Logit(data['admit'], data[train_cols])
# fit the model
result = logit.fit()
print result.summary()
# recreate the dummy variables
dummy_ranks_test = pd.get_dummies(dfTest['prestige'], prefix='prestige')
print dummy_ranks_test
#create intercept column
dfTest['intercept'] = 1.0
# keep only what we need for making predictions
cols_to_keep = ['gre', 'gpa', 'prestige', 'intercept']
dfTest = dfTest[cols_to_keep].join(dummy_ranks_test.ix[:, 'prestige_good':])
dfTest.head()
# make predictions on the enumerated dataset
dfTest['admit_pred'] = result.predict(dfTest[train_cols])
#see probabilities
print dfTest.head()
#convert probabilities to 'yes' 'no'
dfTest['admit_yn']= np.where(dfTest['admit_pred'] > 0.5,'yes','no')
print dfTest.head()
cols= ['gre', 'gpa', 'admit_yn']
dfTest[cols].groupby('admit_yn').mean()
# gre gpa
#admit_yn
#no 556.585366 3.324268
#yes 676.666667 3.750000
cols= ['gre', 'gpa', 'admit_yn']
dfTest[cols].groupby('admit_yn').mean()
# gre gpa
#admit_yn
#no 556.585366 3.324268
#yes 676.666667 3.750000
dfTest.to_csv('C:\\Users\\ujjwal.karn\\Desktop\\Python\\output.csv', sep=',')
|
macropy/experimental/test/pyxl_snippets.py | CyberFlameGO/macropy | 2,061 | 12724087 | # -*- coding: utf-8 -*-
import re
import unittest
from xml.etree import ElementTree
from macropy.case_classes import macros, case
from macropy.experimental.pyxl_strings import macros, p # noqa: F811
from macropy.tracing import macros, require # noqa: F811, F401
from pyxl import html # noqa: F401
def normalize(string):
return ElementTree.tostring(
ElementTree.fromstring(
re.sub("\n *", "", string)
),
encoding='utf8', method='xml')
class Tests(unittest.TestCase):
def test_inline_python(self):
image_name = "bolton.png"
image = p['<img src="/static/images/{image_name}" />']
text = "<NAME>"
block = p['<div>{image}{text}</div>']
element_list = [image, text]
block2 = p['<div>{element_list}</div>']
with require:
block2.to_string() == '<div><img src="/static/images/bolton.png" /><NAME></div>'
def test_dynamic(self):
items = ['Puppies', 'Dragons']
nav = p['<ul />']
for text in items:
nav.append(p['<li>{text}</li>'])
with require:
str(nav) == "<ul><li>Puppies</li><li>Dragons</li></ul>"
def test_attributes(self):
fruit = p['<div data-text="tangerine" />']
with require:
fruit.data_text == "tangerine"
fruit.set_attr('data-text', 'clementine')
with require:
fruit.attr('data-text') == "clementine"
def test_interpreter(self):
safe_value = "<b>Puppies!</b>"
unsafe_value = "<script>bad();</script>"
unsafe_attr = '">'
pyxl_blob = p["""<div class="{unsafe_attr}">
{unsafe_value}
{rawhtml(safe_value)}
</div>"""]
target_blob = '<div class="">"><script>bad();</script> <b>Puppies!</b></div>'
with require:
normalize(pyxl_blob.to_string()) == normalize(target_blob)
def test_modules(self):
from pyxl.element import x_element
@case
class User(name, profile_picture):
pass
class x_user_badge(x_element):
__attrs__ = {
'user': object,
}
def render(self):
return p["""
<div>
<img src="{self.user.profile_picture}" style="float: left; margin-right: 10px;"/>
<div style="display: table-cell;">
<div>{self.user.name}</div>
{self.children()}
</div>
</div>"""]
user = User("cowman", "http:/www.google.com")
content = p['<div>Any arbitrary content...</div>']
pyxl_blob = p['<user_badge user="{user}">{content}</user_badge>']
target_blob = """
<div>
<img src="http:/www.google.com" style="float: left; margin-right: 10px;" />
<div style="display: table-cell;"><div>cowman</div>
<div>Any arbitrary content...</div></div>
</div>"""
with require:
normalize(pyxl_blob.to_string()) == normalize(target_blob)
|
benchmarks/lucasb-eyer-heatmap/examples/customstamps.py | pointhi/benchmarks | 206 | 12724088 | #!/usr/bin/env python
# heatmap - High performance heatmap creation in C.
#
# The MIT License (MIT)
#
# Copyright (c) 2013 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from os.path import join as pjoin, dirname
from ctypes import CDLL, CFUNCTYPE, c_float, c_ulong, c_ubyte
import Image
# The stamp radius. The stamp will be a 2r+1 x 2r+1 square.
r = 15
# Load the heatmap library using ctypes
libhm = CDLL(pjoin(dirname(__file__), '..', 'libheatmap.so'))
# Create the default (round) stamp of given radius.
s_def = libhm.heatmap_stamp_gen(c_ulong(r))
# Create a custom stamp of given radius using a callback to set the stamp's content.
# The callback will be called for every pixel of the stamp, and should return the
# stamp's value at given distance to the stamp center.
# This is a convenient method to create rotationally-symmetric stamps.
HM_CB_FUNC = CFUNCTYPE(c_float, c_float)
s_fat = libhm.heatmap_stamp_gen_nonlinear(c_ulong(r), HM_CB_FUNC(lambda d: d**4))
s_pty = libhm.heatmap_stamp_gen_nonlinear(c_ulong(r), HM_CB_FUNC(lambda d: d**0.125))
# Create a custom stamp from a raw data array. The data needs to be
# laid out linearly in row-major (i.e. C) order. That means that the values
# for the pixels are ordered like:
# (x0, y0), (x1, y0), ..., (xN, y0), (x0, y1), ..., (xN, y1), ..., (xN, yM)
#
# Here, I create a "soft rectangle" stamp of fixed 10x5 size.
sw, sh = 10, 5
stampbuf = (c_float*(sw*sh))(
0.00, 0.16, 0.33, 0.33, 0.33, 0.33, 0.33, 0.33, 0.16, 0.00,
0.16, 0.33, 0.66, 0.66, 0.66, 0.66, 0.66, 0.66, 0.33, 0.16,
0.33, 0.66, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 0.66, 0.33,
0.16, 0.33, 0.66, 0.66, 0.66, 0.66, 0.66, 0.66, 0.33, 0.16,
0.00, 0.16, 0.33, 0.33, 0.33, 0.33, 0.33, 0.33, 0.16, 0.00,
)
s_rct = libhm.heatmap_stamp_load(c_ulong(sw), c_ulong(sh), stampbuf)
# Create a heatmap object large enough to hold one occurrence of each stamp.
d = 2*r+1
w, h = 3*d + 10, d
hm = libhm.heatmap_new(w, h)
# Add one point with each stamp next to each other; this way we can
# see what the stamps look like.
libhm.heatmap_add_point_with_stamp(hm, c_ulong( r), c_ulong(r), s_def)
libhm.heatmap_add_point_with_stamp(hm, c_ulong( d + r), c_ulong(r), s_fat)
libhm.heatmap_add_point_with_stamp(hm, c_ulong(2*d + r), c_ulong(r), s_pty)
libhm.heatmap_add_point_with_stamp(hm, c_ulong(3*d + 5), c_ulong(r), s_rct)
# As soon as we're done drawing, we can free the stamps.
# (Of course, we might as well do that later.)
libhm.heatmap_stamp_free(s_def)
libhm.heatmap_stamp_free(s_fat)
libhm.heatmap_stamp_free(s_pty)
libhm.heatmap_stamp_free(s_rct)
# This creates an image out of the heatmap.
# `rawimg` now contains the image data in 32-bit RGBA.
rawimg = (c_ubyte*(w*h*4))()
libhm.heatmap_render_default_to(hm, rawimg)
# Now that we've got a finished heatmap picture, we don't need the map anymore.
libhm.heatmap_free(hm)
# Use the PIL (for example) to make a png file out of that.
img = Image.frombuffer('RGBA', (w, h), rawimg, 'raw', 'RGBA', 0, 1)
img.save('stamps.png')
|
RecoLocalCalo/Castor/test/castor_cfg.py | ckamtsikis/cmssw | 852 | 12724092 | <filename>RecoLocalCalo/Castor/test/castor_cfg.py
import FWCore.ParameterSet.Config as cms
process = cms.Process("CastorProducts")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
# specify the correct database tags which contain the updated gains and channelquality flags
process.load("CondCore.DBCommon.CondDBSetup_cfi")
process.CastorDbProducer = cms.ESProducer("CastorDbProducer")
process.es_pool = cms.ESSource(
"PoolDBESSource",
process.CondDBSetup,
timetype = cms.string('runnumber'),
connect = cms.string('frontier://cmsfrontier.cern.ch:8000/FrontierProd/CMS_COND_31X_HCAL'),
authenticationMethod = cms.untracked.uint32(0),
toGet = cms.VPSet(
cms.PSet(
record = cms.string('CastorPedestalsRcd'),
tag = cms.string('CastorPedestals_v2.0_offline')
),
cms.PSet(
record = cms.string('CastorPedestalWidthsRcd'),
tag = cms.string('CastorPedestalWidths_v2.0_offline')
),
cms.PSet(
record = cms.string('CastorGainsRcd'),
tag = cms.string('CastorGains_v2.0_offline')
),
cms.PSet(
record = cms.string('CastorGainWidthsRcd'),
tag = cms.string('CastorGainWidths_v2.0_offline')
),
cms.PSet(
record = cms.string('CastorQIEDataRcd'),
tag = cms.string('CastorQIEData_v2.0_offline')
),
cms.PSet(
record = cms.string('CastorChannelQualityRcd'),
tag = cms.string('CastorChannelQuality_v2.0_offline')
),
cms.PSet(
record = cms.string('CastorElectronicsMapRcd'),
tag = cms.string('CastorElectronicsMap_v2.0_offline')
)
)
)
# end of Db configuration
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("PoolSource",
duplicateCheckMode = cms.untracked.string("noDuplicateCheck"),
fileNames = cms.untracked.vstring(
'file:data_RAW2DIGI_L1Reco_RECO.root' # choose your input file here
)
)
# load CASTOR default reco chain (from towers on)
process.load('RecoLocalCalo.Castor.Castor_cff')
# construct the module which executes the RechitCorrector for data reconstructed in releases < 4.2.X
process.rechitcorrector = cms.EDProducer("RecHitCorrector",
rechitLabel = cms.InputTag("castorreco","","RECO"), # choose the original RecHit collection
revertFactor = cms.double(62.5), # this is the factor to go back to the original fC: 1/0.016
doInterCalib = cms.bool(True) # do intercalibration
)
# construct the module which executes the RechitCorrector for data reconstructed in releases >= 4.2.X
process.rechitcorrector42 = cms.EDProducer("RecHitCorrector",
rechitLabel = cms.InputTag("castorreco","","RECO"), # choose the original RecHit collection
revertFactor = cms.double(1), # this is the factor to go back to the original fC - not needed when data is already intercalibrated
doInterCalib = cms.bool(False) # don't do intercalibration, RecHitCorrector will only correct the EM response and remove BAD channels
)
# tell to the CastorCell reconstruction that he should use the new corrected rechits for releases < 4.2.X
#process.CastorCellReco.inputprocess = "rechitcorrector"
# tell to the CastorTower reconstruction that he should use the new corrected rechits for releases >= 4.2.X
process.CastorTowerReco.inputprocess = "rechitcorrector"
process.MyOutputModule = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('rechitcorrector_output.root') # choose your output file
)
# execute the rechitcorrector and afterwards do the reco chain again (towers -> jets)
process.producer = cms.Path(process.rechitcorrector*process.CastorFullReco)
process.end = cms.EndPath(process.MyOutputModule)
|
river/metrics/multioutput/micro.py | online-ml/creme | 1,105 | 12724096 | from river import metrics, utils
from river.metrics.multioutput.base import MultiOutputMetric
__all__ = ["MicroAverage"]
class MicroAverage(MultiOutputMetric, metrics.base.WrapperMetric):
"""Micro-average wrapper.
The provided metric is updated with the value of each output.
Parameters
----------
metric
A classification or a regression metric.
"""
def __init__(self, metric):
self._metric = metric
@property
def metric(self):
return self._metric
def works_with(self, model) -> bool:
if isinstance(self.metric, metrics.base.ClassificationMetric):
return utils.inspect.ismoclassifier(model)
return utils.inspect.ismoregressor(model)
def update(self, y_true, y_pred, sample_weight=1.0):
for i in y_pred:
self.metric.update(y_true[i], y_pred[i], sample_weight)
return self
def revert(self, y_true, y_pred, sample_weight=1.0):
for i in y_pred:
self.metric.revert(y_true[i], y_pred[i], sample_weight)
return self
def get(self):
return self.metric.get()
|
taskwiki/completion.py | Jasha10/taskwiki | 465 | 12724107 | <filename>taskwiki/completion.py
from functools import reduce, wraps
import re
from tasklib import TaskWarrior
from taskwiki import constants
from taskwiki import regexp
def complete_last_word(f):
@wraps(f)
def wrapper(self, arglead):
before, sep, after = arglead.rpartition(' ')
comps = f(self, after)
if comps:
return [before + sep + comp for comp in comps]
else:
return []
return wrapper
# TODO(2023-06-27): use functools once python 3.7 is EOL
def cached_property(f):
@wraps(f)
def wrapper(self):
k = '_cache_' + f.__name__
if k in self.__dict__:
return self.__dict__[k]
else:
v = f(self)
self.__dict__[k] = v
return v
return wrapper
# "must*opt" -> "must(o(p(t)?)?)?"
def prefix_regex(s):
must, _, opt = s.partition('*')
return must + reduce(lambda y, x: f"({x}{y})?", reversed(opt), '')
RE_PROJECT = re.compile(prefix_regex('pro*ject'))
RE_DATE = re.compile('|'.join(
[prefix_regex(r)
for r in "du*e un*til wa*it ent*ry end st*art sc*heduled".split()]))
RE_RECUR = re.compile(prefix_regex('re*cur'))
class Completion():
def __init__(self, tw):
self.tw = tw
@cached_property
def _attributes(self):
return sorted(self.tw.execute_command(['_columns']))
@cached_property
def _tags(self):
if self.tw.version < TaskWarrior.VERSION_2_4_5:
return sorted(self.tw.execute_command(['_tags']))
else:
return sorted(set(
tag
for tags in self.tw.execute_command(['_unique', 'tag'])
for tag in tags.split(',')))
@cached_property
def _projects(self):
if self.tw.version < TaskWarrior.VERSION_2_4_5:
return sorted(self.tw.execute_command(['_projects']))
else:
return sorted(self.tw.execute_command(['_unique', 'project']))
def _complete_any(self, w):
if w:
return []
return ['+', '-'] + [attr + ':' for attr in self._attributes()]
def _complete_attributes(self, w):
if not w.isalpha():
return []
return [attr + ':'
for attr in self._attributes()
if attr.startswith(w)]
def _complete_tags(self, w):
if not w or w[0] not in ['+', '-']:
return []
t = w[1:]
return [w[0] + tag
for tag in self._tags()
if tag.startswith(t)]
def _comp_words(self, w, pattern, words):
before, sep, after = w.partition(':')
if not sep or not re.fullmatch(pattern, before):
return []
return [before + sep + word
for word in words()
if word.startswith(after)]
def _complete_projects(self, w):
return self._comp_words(w, RE_PROJECT, self._projects)
def _complete_dates(self, w):
return self._comp_words(w, RE_DATE, lambda: constants.COMPLETION_DATE)
def _complete_recur(self, w):
return self._comp_words(w, RE_RECUR, lambda: constants.COMPLETION_RECUR)
@complete_last_word
def modify(self, w):
return \
self._complete_any(w) or \
self._complete_attributes(w) or \
self._complete_projects(w) or \
self._complete_tags(w) or \
self._complete_dates(w) or \
self._complete_recur(w) or \
[]
def omni_modstring_findstart(self, line):
m = re.search(regexp.GENERIC_TASK, line)
bline = line.encode("utf-8") # omni findstart needs byte offset
if m and not m.group('uuid') and b' -- ' in bline:
return bline.rfind(b' ') + 1
else:
return -1
def omni_modstring(self, w):
return \
self._complete_any(w) or \
self._complete_attributes(w) or \
self._complete_projects(w) or \
self._complete_tags(w) or \
self._complete_dates(w) or \
self._complete_recur(w) or \
[]
|
{{cookiecutter.project_slug}}/backend/app/app/api/api_v1/api.py | abnerjacobsen/full-stack | 516 | 12724110 | <filename>{{cookiecutter.project_slug}}/backend/app/app/api/api_v1/api.py<gh_stars>100-1000
# Import installed packages
# Import app code
from app.main import app
from app.core import config
from app.db.flask_session import db_session
from .api_docs import docs
from .endpoints import role
from .endpoints import token
from .endpoints import user
from .endpoints import utils
|
00Python/day12/PoliceVsTheif.py | HaoZhang95/PythonAndMachineLearning | 937 | 12724184 | """
警察vs土匪
"""
class Gun(object):
def __init__(self, model, damage):
# 型号
self.model = model
# 杀伤力
self.damage = damage
# 子弹数量,默认为0
self.bullet_count = 0
# 重写str
def __str__(self):
return "型号:%s, 杀伤力:%s, 子弹数量:%s" % (
self.model, self.damage, self.bullet_count
)
# 填充子弹
def add_bullets(self, bullet_count):
self.bullet_count += bullet_count
print("填充子弹完成,当前数量为:%s" % bullet_count)
# gun发射子弹打击土匪
def shoot(self, enemy):
# 判断当前枪是否有子弹
if self.bullet_count <= 0:
print("%s 没有子弹, 请填充子弹" % self.model)
else:
# 如果有子弹,更新子弹数量
self.bullet_count -= 1
# 判断是否击中土匪
if enemy is not None:
enemy.hurt(self)
print("发射了一颗子弹 %s 剩余子弹:%d" % (self.model, self.bullet_count))
class Player(object):
def __init__(self, name, hp=100):
# 玩家名字
self.name = name
# 血量
self.hp = hp
# 使用的枪械
self.gun = None
def __str__(self):
# 如果土匪的学量小于0
if self.hp <= 0:
return "%s 已经挂掉了..." % self.name
else:
# 没枪是土匪,只有警察有枪
if self.gun is None:
return "%s [%d]没有佩戴枪" % (self.name, self.hp)
else:
return "%s [%d] 枪:%s" % (self.name, self.hp, self.gun)
# 土匪受伤的方法
def hurt(self, enemy_gun):
# 击中更新血量
self.hp -= enemy_gun.damage
# 判断剩余血量
if self.hp <= 0:
print("%s 已经挂掉了..." % self.name)
else:
print("%s 被 %s 击中,剩余血量: %d" % (self.name, enemy_gun.model, self.hp))
# 警察开火
def fire(self, enemy):
# 警察判断自己有无武器
if self.gun is None:
print("%s 没有佩戴枪, 请佩戴枪" % self.name)
return
# 判断有无子弹
if self.gun.bullet_count <= 0:
# 自动填充子弹
self.gun.add_bullets(10)
# 射击土匪
self.gun.shoot(enemy)
print("%s 正在向 %s 开火..." % (self.name, enemy.name))
# 测试main()函数
def main():
# 创建一个警察
police_man = Player("警察")
# 创建一个土匪
bad_man = Player("土匪", 70)
# 枪打土匪(无枪)
police_man.fire(bad_man)
# 使用枪类创建一把AK47
ak47 = Gun("AK47", 50)
# 给警察配枪
police_man.gun = ak47
# 枪打土匪(有枪)
police_man.fire(bad_man)
police_man.fire(bad_man)
# # 填充子弹
# ak47.add_bullets(50)
main()
|
projects/causal_scene_generation/causal_model/game_characters/procedural_generation/game_character_scene.py | amoskowitz14/causalML | 354 | 12724209 | <reponame>amoskowitz14/causalML
from PIL import ImageOps, Image
import os
image_dict = {
"Satyr": {
"base_path": "../images/satyr/PNG/",
"Attacking": "/reference/Attacking/attack.png",
"Taunt": "/reference/Taunt/taunt.png",
"Walking": "/reference/Walking/walking.png",
"Dying": "/reference/Dying/dying.png",
"Hurt": "/reference/Hurt/hurt.png",
"Idle": "/reference/Idle/idle.png"
},
"Golem": {
"base_path": "../images/golem/PNG/",
"Attacking": "/reference/Attacking/attack.png",
"Taunt": "/reference/Taunt/taunt.png",
"Walking": "/reference/Walking/walking.png",
"Dying": "/reference/Dying/dying.png",
"Hurt": "/reference/Hurt/hurt.png",
"Idle": "/reference/Idle/idle.png"
}
}
def get_concat_h(im1, im2):
dst = Image.new('RGB', (im1.width + im2.width, im1.height))
dst.paste(im1, (0, 0))
dst.paste(im2, (im1.width, 0))
return dst
def draw_duel(actor, reactor):
'''
Loading variables.
'''
act_name = actor["name"]
rct_name = reactor["name"]
action = actor["action"]
reaction = reactor["reaction"]
act_type = actor["type"]
rct_type = reactor["type"]
img1 = Image.open(image_dict[act_name]["base_path"]+act_type+image_dict[act_name][action])
img2 = Image.open(image_dict[rct_name]["base_path"]+rct_type+image_dict[rct_name][reaction])
#Flipping the reactor to give the feel of a duel.
img2 = ImageOps.mirror(img2)
return get_concat_h(img1, img2), img1, img2 |
flaskblog/auth/models.py | davshen/Flog | 202 | 12724247 | <reponame>davshen/Flog
from ..models import db
class OAuth2Token(db.Model):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(40))
token_type = db.Column(db.String(40))
access_token = db.Column(db.String(200))
refresh_token = db.Column(db.String(200))
expires_at = db.Column(db.Integer())
user_id = db.Column(db.Integer(), db.ForeignKey("user.id"))
user = db.relationship("User", backref=db.backref("tokens", lazy="dynamic"))
def to_token(self):
return dict(
access_token=self.access_token,
token_type=self.token_type,
refresh_token=self.refresh_token,
expires_at=self.expires_at,
)
|
third_party/chromite/cbuildbot/stages/handle_changes_stages_unittest.py | zipated/src | 2,151 | 12724263 | # Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module containing the unit tests for handle_changes_stages."""
from __future__ import print_function
import itertools
import mock
from chromite.cbuildbot import relevant_changes
from chromite.cbuildbot.stages import handle_changes_stages
from chromite.cbuildbot.stages import generic_stages
from chromite.cbuildbot.stages import generic_stages_unittest
from chromite.cbuildbot.stages import sync_stages
from chromite.lib import builder_status_lib
from chromite.lib import cidb
from chromite.lib import clactions
from chromite.lib import config_lib
from chromite.lib import constants
from chromite.lib import fake_cidb
from chromite.lib import hwtest_results
from chromite.lib import timeout_util
from chromite.lib import tree_status
from chromite.lib.const import waterfall
# pylint: disable=protected-access
class CommitQueueHandleChangesStageTests(
generic_stages_unittest.AbstractStageTestCase):
"""Tests for CommitQueueHandleChangesStag."""
BOT_ID = 'master-paladin'
def setUp(self):
self._Prepare()
self.partial_submit_changes = ['A', 'B']
self.other_changes = ['C', 'D']
self.changes = self.other_changes + self.partial_submit_changes
self.PatchObject(builder_status_lib, 'GetFailedMessages')
self.PatchObject(relevant_changes.RelevantChanges,
'_GetSlaveMappingAndCLActions',
return_value=(dict(), []))
self.PatchObject(clactions, 'GetRelevantChangesForBuilds')
self.PatchObject(tree_status, 'WaitForTreeStatus',
return_value=constants.TREE_OPEN)
self.PatchObject(relevant_changes.RelevantChanges,
'GetPreviouslyPassedSlavesForChanges')
self.mock_record_metrics = self.PatchObject(
handle_changes_stages.CommitQueueHandleChangesStage,
'_RecordSubmissionMetrics')
self.sync_stage = self._MockSyncStage()
self.completion_stage = mock.Mock()
def tearDown(self):
cidb.CIDBConnectionFactory.ClearMock()
def _MockSyncStage(self, tree_was_open=True):
sync_stage = sync_stages.CommitQueueSyncStage(self._run)
sync_stage.pool = mock.MagicMock()
sync_stage.pool.applied = self.changes
sync_stage.pool.tree_was_open = tree_was_open
sync_stage.pool.handle_failure_mock = self.PatchObject(
sync_stage.pool, 'HandleValidationFailure')
sync_stage.pool.handle_timeout_mock = self.PatchObject(
sync_stage.pool, 'HandleValidationTimeout')
sync_stage.pool.submit_pool_mock = self.PatchObject(
sync_stage.pool, 'SubmitPool')
return sync_stage
# pylint: disable=W0221
def ConstructStage(self, sync_stage=None, completion_stage=None):
sync_stage = sync_stage or self.sync_stage
completion_stage = completion_stage or self.completion_stage
return handle_changes_stages.CommitQueueHandleChangesStage(
self._run, sync_stage, completion_stage)
def test_GetBuildsPassedSyncStage(self):
"""Test _GetBuildsPassedSyncStage."""
stage = self.ConstructStage()
mock_cidb = mock.Mock()
mock_cidb.GetSlaveStages.return_value = [
{'build_config': 's_1', 'status': 'pass', 'name': 'CommitQueueSync'},
{'build_config': 's_2', 'status': 'pass', 'name': 'CommitQueueSync'},
{'build_config': 's_3', 'status': 'fail', 'name': 'CommitQueueSync'}]
mock_cidb.GetBuildStages.return_value = [
{'status': 'pass', 'name': 'CommitQueueSync'}]
builds = stage._GetBuildsPassedSyncStage(
'build_id', mock_cidb, ['id_1', 'id_2'])
self.assertItemsEqual(builds, ['s_1', 's_2', 'master-paladin'])
def _MockPartialSubmit(self, stage):
self.PatchObject(relevant_changes.RelevantChanges,
'GetRelevantChangesForSlaves',
return_value={'master-paladin': {mock.Mock()}})
self.PatchObject(relevant_changes.RelevantChanges,
'GetSubsysResultForSlaves')
self.PatchObject(handle_changes_stages.CommitQueueHandleChangesStage,
'_GetBuildsPassedSyncStage')
stage.sync_stage.pool.SubmitPartialPool.return_value = self.changes
def testHandleCommitQueueFailureWithOpenTree(self):
"""Test _HandleCommitQueueFailure with open tree."""
stage = self.ConstructStage()
self._MockPartialSubmit(stage)
self.PatchObject(tree_status, 'WaitForTreeStatus',
return_value=constants.TREE_OPEN)
self.PatchObject(generic_stages.BuilderStage,
'GetScheduledSlaveBuildbucketIds', return_value=[])
stage._HandleCommitQueueFailure(set(['test1']), set(), set(), False)
stage.sync_stage.pool.handle_failure_mock.assert_called_once_with(
mock.ANY, sanity=True, no_stat=set(), changes=self.changes,
failed_hwtests=None)
def testHandleCommitQueueFailureWithThrottledTree(self):
"""Test _HandleCommitQueueFailure with throttled tree."""
stage = self.ConstructStage()
self._MockPartialSubmit(stage)
self.PatchObject(tree_status, 'WaitForTreeStatus',
return_value=constants.TREE_THROTTLED)
self.PatchObject(generic_stages.BuilderStage,
'GetScheduledSlaveBuildbucketIds', return_value=[])
stage._HandleCommitQueueFailure(set(['test1']), set(), set(), False)
stage.sync_stage.pool.handle_failure_mock.assert_called_once_with(
mock.ANY, sanity=False, no_stat=set(), changes=self.changes,
failed_hwtests=None)
def testHandleCommitQueueFailureWithClosedTree(self):
"""Test _HandleCommitQueueFailure with closed tree."""
stage = self.ConstructStage()
self._MockPartialSubmit(stage)
self.PatchObject(tree_status, 'WaitForTreeStatus',
side_effect=timeout_util.TimeoutError())
self.PatchObject(generic_stages.BuilderStage,
'GetScheduledSlaveBuildbucketIds', return_value=[])
stage._HandleCommitQueueFailure(set(['test1']), set(), set(), False)
stage.sync_stage.pool.handle_failure_mock.assert_called_once_with(
mock.ANY, sanity=False, no_stat=set(), changes=self.changes,
failed_hwtests=None)
def testHandleCommitQueueFailureWithFailedHWtests(self):
"""Test _HandleCommitQueueFailure with failed HWtests."""
stage = self.ConstructStage()
self._MockPartialSubmit(stage)
master_build_id = stage._run.attrs.metadata.GetValue('build_id')
db = fake_cidb.FakeCIDBConnection()
slave_build_id = db.InsertBuild(
'slave_1', waterfall.WATERFALL_INTERNAL, 1, 'slave_1', 'bot_hostname',
master_build_id=master_build_id, buildbucket_id='123')
cidb.CIDBConnectionFactory.SetupMockCidb(db)
mock_failed_hwtests = mock.Mock()
mock_get_hwtests = self.PatchObject(
hwtest_results.HWTestResultManager,
'GetFailedHWTestsFromCIDB', return_value=mock_failed_hwtests)
self.PatchObject(tree_status, 'WaitForTreeStatus',
return_value=constants.TREE_OPEN)
self.PatchObject(generic_stages.BuilderStage,
'GetScheduledSlaveBuildbucketIds', return_value=['123'])
stage._HandleCommitQueueFailure(set(['test1']), set(), set(), False)
stage.sync_stage.pool.handle_failure_mock.assert_called_once_with(
mock.ANY, sanity=True, no_stat=set(), changes=self.changes,
failed_hwtests=mock_failed_hwtests)
mock_get_hwtests.assert_called_once_with(db, [slave_build_id])
def VerifyStage(self, failing, inflight, no_stat, handle_failure=False,
handle_timeout=False, sane_tot=True, stage=None,
all_slaves=None, slave_stages=None, fatal=True,
self_destructed=False):
"""Runs and Verifies PerformStage.
Args:
failing: The names of the builders that failed.
inflight: The names of the buiders that timed out.
no_stat: The names of the builders that had no status.
handle_failure: If True, calls HandleValidationFailure.
handle_timeout: If True, calls HandleValidationTimeout.
sane_tot: If not true, assumes TOT is not sane.
stage: If set, use this constructed stage, otherwise create own.
all_slaves: Optional set of all slave configs.
slave_stages: Optional list of slave stages.
fatal: Optional boolean indicating whether the completion_stage failed
with fatal. Default to True.
self_destructed: Optional boolean indicating whether the completion_stage
self_destructed. Default to False.
"""
if not stage:
stage = self.ConstructStage()
stage._run.attrs.metadata.UpdateWithDict(
{constants.SELF_DESTRUCTED_BUILD: self_destructed})
# Setup the stage to look at the specified configs.
all_slaves = list(all_slaves or set(failing + inflight + no_stat))
all_started_slaves = list(all_slaves or set(failing + inflight))
configs = [config_lib.BuildConfig(name=x) for x in all_slaves]
self.PatchObject(stage, '_GetSlaveConfigs', return_value=configs)
statuses = {}
for x in failing:
statuses[x] = builder_status_lib.BuilderStatus(
constants.BUILDER_STATUS_FAILED, message=None)
for x in inflight:
statuses[x] = builder_status_lib.BuilderStatus(
constants.BUILDER_STATUS_INFLIGHT, message=None)
for x in no_stat:
statuses[x] = builder_status_lib.BuilderStatus(
constants.BUILDER_STATUS_MISSING, message=None)
self.completion_stage.GetSlaveStatuses.return_value = statuses
self.completion_stage.GetFatal.return_value = fatal
# Setup DB and provide list of slave stages.
mock_cidb = mock.MagicMock()
cidb.CIDBConnectionFactory.SetupMockCidb(mock_cidb)
if slave_stages is None:
slave_stages = []
critical_stages = (
relevant_changes.TriageRelevantChanges.STAGE_SYNC)
for stage_name, slave in itertools.product(
critical_stages, all_started_slaves):
slave_stages.append({'name': stage_name,
'build_config': slave,
'status': constants.BUILDER_STATUS_PASSED})
self.PatchObject(mock_cidb, 'GetSlaveStages', return_value=slave_stages)
# Set up SubmitPartialPool to provide a list of changes to look at.
self.PatchObject(stage.sync_stage.pool, 'SubmitPartialPool',
return_value=self.other_changes)
# Actually run the stage.
stage.PerformStage()
if fatal:
stage.sync_stage.pool.submit_pool_mock.assert_not_called()
self.mock_record_metrics.assert_called_once_with(False)
else:
stage.sync_stage.pool.submit_pool_mock.assert_called_once_with(
reason=constants.STRATEGY_CQ_SUCCESS)
self.mock_record_metrics.assert_called_once_with(True)
if handle_failure:
stage.sync_stage.pool.handle_failure_mock.assert_called_once_with(
mock.ANY, no_stat=set(no_stat), sanity=sane_tot,
changes=self.other_changes, failed_hwtests=mock.ANY)
if handle_timeout:
stage.sync_stage.pool.handle_timeout_mock.assert_called_once_with(
sanity=mock.ANY, changes=self.other_changes)
def testCompletionSuccess(self):
"""Verify stage when the completion_stage succeeded."""
self.VerifyStage([], [], [], fatal=False)
def testCompletionWithInflightSlaves(self):
"""Verify stage when the completion_stage failed with inflight slaves."""
self.VerifyStage([], ['foo'], [], handle_timeout=True)
def testCompletionSelfDestructedWithInflightSlaves(self):
"""Verify stage when the completion_stage self_destructed with inflight."""
self.VerifyStage([], ['foo'], [], self_destructed=True, handle_failure=True)
def testCompletionSelfDestructedWithFailingSlaves(self):
"""Verify stage when the completion_stage self_destructed with failing."""
self.VerifyStage(['foo'], [], [], self_destructed=True, handle_failure=True)
def testCompletionSelfDestructedWithdNoStatSlaves(self):
"""Verify stage when the completion_stage self_destructed with no_stat."""
self.VerifyStage([], [], ['foo'], self_destructed=True, handle_failure=True)
|
web/migrations/0014_auto_20200115_2239.py | nonomal/oh-my-rss | 270 | 12724265 | # Generated by Django 2.2.7 on 2020-01-15 14:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0013_auto_20200108_2257'),
]
operations = [
migrations.AlterField(
model_name='article',
name='src_url',
field=models.CharField(max_length=1024, unique=True, verbose_name='原始链接'),
),
migrations.AlterField(
model_name='article',
name='title',
field=models.CharField(max_length=200, verbose_name='标题'),
),
migrations.AlterField(
model_name='site',
name='creator',
field=models.CharField(blank=True, choices=[('system', '系统录入'), ('user', '用户提交'), ('wemp', '微信公众号')], db_index=True, default='system', max_length=20, null=True, verbose_name='创建人'),
),
migrations.AlterField(
model_name='site',
name='link',
field=models.CharField(max_length=1024, verbose_name='主页'),
),
migrations.AlterField(
model_name='site',
name='rss',
field=models.CharField(blank=True, max_length=1024, null=True, verbose_name='RSS地址'),
),
]
|
leetcode/138.copy-list-with-random-pointer.py | geemaple/algorithm | 177 | 12724282 | <reponame>geemaple/algorithm
# Definition for singly-linked list with a random pointer.
# class RandomListNode(object):
# def __init__(self, x):
# self.label = x
# self.next = None
# self.random = None
class Solution(object):
def copyRandomList(self, head):
"""
:type head: RandomListNode
:rtype: RandomListNode
"""
if head is None:
return None
# append copy node behind its orignal one
# 1 -> 1' -> 2 -> 2' -> .... -> n -> n' -> None
current = head
while (current is not None):
node = RandomListNode(current.label)
node.next = current.next
current.next = node
current = current.next.next
# copy random pointers
current = head
while(current is not None):
if current.random is not None:
current.next.random = current.random.next
current = current.next.next
# construct new linked list
new_head = head.next
new_cur = new_head
old_cur = head
while(new_cur is not None):
old_cur.next = new_cur.next
if new_cur.next is not None:
new_cur.next = new_cur.next.next
new_cur = new_cur.next
old_cur = old_cur.next
return new_head |
tests/test_structs.py | avivazran/UnrealEnginePython | 2,350 | 12724320 | <filename>tests/test_structs.py
import unittest
import unreal_engine as ue
from unreal_engine.structs import ColorMaterialInput, Key
from unreal_engine.structs import StaticMeshSourceModel, MeshBuildSettings
class TestStructs(unittest.TestCase):
def test_new_struct(self):
material_input = ColorMaterialInput()
self.assertTrue('MaskR' in material_input.fields())
def test_new_struct_with_kwargs(self):
material_input = ColorMaterialInput(Mask=1, MaskR=1, MaskG=1, MaskB=0, MaskA=1)
self.assertEqual(material_input.Mask, 1)
self.assertEqual(material_input.MaskR, 1)
self.assertEqual(material_input.MaskG, 1)
self.assertEqual(material_input.MaskB, 0)
self.assertEqual(material_input.MaskA, 1)
def test_struct_set(self):
material_input = ColorMaterialInput()
material_input.MaskG = 1
self.assertEqual(material_input.MaskG, 1)
def test_struct_clone(self):
material_input = ColorMaterialInput(Mask=1, MaskR=0, MaskG=1, MaskB=0, MaskA=1)
material_input2 = material_input.clone()
self.assertEqual(material_input2.Mask, 1)
self.assertEqual(material_input2.MaskR, 0)
self.assertEqual(material_input2.MaskG, 1)
self.assertEqual(material_input2.MaskB, 0)
self.assertEqual(material_input2.MaskA, 1)
def test_cmp(self):
key1 = Key(KeyName='SpaceBar')
key2 = Key(KeyName='SpaceBar')
self.assertEqual(key1, key2)
def test_ptr(self):
source_model = StaticMeshSourceModel()
source_model.BuildSettings.bRecomputeNormals=False
source_model.BuildSettings.bRecomputeTangents=True
source_model.BuildSettings.bUseMikkTSpace=True
source_model.BuildSettings.bBuildAdjacencyBuffer=True
source_model.BuildSettings.bRemoveDegenerates=True
source_model2 = source_model.clone()
self.assertEqual(source_model2.BuildSettings.bRecomputeNormals, False)
self.assertEqual(source_model2.BuildSettings.bRecomputeTangents, True)
self.assertEqual(source_model2.BuildSettings.bUseMikkTSpace, True)
self.assertEqual(source_model2.BuildSettings.bBuildAdjacencyBuffer, True)
self.assertEqual(source_model2.BuildSettings.bRemoveDegenerates, True)
|
tests/test_utils.py | hoechenberger/pycircstat | 125 | 12724331 | from __future__ import absolute_import
import numpy as np
from numpy.testing import assert_allclose
from pycircstat import utils
|
python/ql/test/experimental/dataflow/pep_328/package/subpackage2/moduleZ.py | timoles/codeql | 4,036 | 12724332 | eggs = "eggs"
|
backend/storage/async_s3.py | xuantan/viewfinder | 645 | 12724335 | # Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Async version of Amazon S3 access library.
The "boto" open source library supports synchronous operations against
S3, but does not have asynchronous support. In a high-scale server
environment, this is a real problem, because it is not permissible to
block threads waiting on network I/O. This module layers support for non-
blocking async operations over the boto library. It re-uses boto
functionality whenever possible.
"""
__author__ = '<EMAIL> (<NAME>)'
import logging
import socket
import urllib
from tornado.httpclient import AsyncHTTPClient, HTTPRequest, HTTPError
from boto.connection import AWSAuthConnection
from boto.s3.connection import SubdomainCallingFormat
from viewfinder.backend.base.retry import RetryPolicy, CallWithRetryAsync
class S3RetryPolicy(RetryPolicy):
"""Define a retry policy that is adapted to the Amazon S3 service.
Retries will only be attempted for HTTP 500-level errors, or if there
was a basic network failure of some kind. By default, a request
against S3 will be retried three times, with retries starting after
at least 1/2 second, and exponentially backing off from there to a
maximum of 10 seconds.
"""
def __init__(self, max_tries=3, timeout=30, min_delay=.5, max_delay=10):
RetryPolicy.__init__(self, max_tries=max_tries, timeout=timeout, min_delay=min_delay, max_delay=max_delay,
check_result=self._ShouldRetry)
def _ShouldRetry(self, response):
"""Retry on:
1. HTTP error codes 500 (Internal Server Error) and 503 (Service
Unavailable).
2. Tornado HTTP error code 599, which typically indicates some kind
of general network failure of some kind.
3. Socket-related errors.
"""
if response.error:
# Check for socket errors.
if type(response.error) == socket.error or type(response.error) == socket.gaierror:
return True
# Check for HTTP errors.
if isinstance(response.error, HTTPError):
code = response.error.code
if code in (500, 503, 599):
return True
return False
class AsyncS3Connection(AWSAuthConnection):
"""Sub-class that adds support for asynchronous S3 access. Callers provide
their Amazon AWS access key and secret key when an instance of the class
is created. Then, callers can repeatedly call 'make_request' in order to
make asynchronous HTTP calls against the S3 service. Using this API
rather than the standard boto API avoids blocking the calling thread
until the operation is complete.
"""
DefaultHost = 's3.amazonaws.com'
"""By default, connect to this S3 endpoint."""
DefaultCallingFormat = SubdomainCallingFormat()
"""By default, use the S3 sub-domain format for providing bucket name."""
def __init__(self, host=DefaultHost, aws_access_key_id=None, aws_secret_access_key=None,
retry_policy=S3RetryPolicy()):
AWSAuthConnection.__init__(self, host, aws_access_key_id, aws_secret_access_key)
self.retry_policy = retry_policy
def make_request(self, method, bucket='', key='', headers=None, params=None,
body=None, request_timeout=20.0, callback=None):
"""Start an asynchronous HTTP operation against the S3 service. When
the operation is complete, the 'callback' function will be invoked,
with the HTTP response object as its only parameter. If a failure
occurs during execution of the operation, it may be retried, according
to the retry policy with which this instance was initialized.
"""
CallWithRetryAsync(self.retry_policy, self._make_request, method, bucket, key,
headers, params, body, request_timeout,
callback=callback)
def _make_request(self, method, bucket, key, headers, params, body, request_timeout, callback):
"""Wrapped by CallWithRetryAsync in order to support retry."""
# Build the boto HTTP request in order to create the authorization header.
path = AsyncS3Connection.DefaultCallingFormat.build_path_base(bucket, key)
auth_path = AsyncS3Connection.DefaultCallingFormat.build_auth_path(bucket, key)
host = AsyncS3Connection.DefaultCallingFormat.build_host(self.server_name(), bucket)
# Only support byte strings for now.
assert not body or type(body) is str, "Only support byte strings (type=%s)." % type(body)
boto_request = self.build_base_http_request(method, path, auth_path,
{}, headers, body or '', host)
boto_request.authorize(connection=self)
# Log request for debugging.
debug_body = boto_request.body[:256].decode(errors='ignore') if boto_request.body else None
logging.debug('%s "%s://%s%s" headers: %s body: %s', boto_request.method, self.protocol,
boto_request.host, boto_request.path, boto_request.headers, debug_body)
request_url = '%s://%s%s' % (self.protocol, host, path)
if params:
request_url += '?' + urllib.urlencode(params)
# Build the tornado http client request (different version of HTTPRequest class).
tornado_request = HTTPRequest(request_url, method=method,
headers=boto_request.headers, body=body,
request_timeout=request_timeout)
# Start the asynchronous request. When it's complete, invoke 'callback', passing the HTTP response object.
http_client = AsyncHTTPClient()
http_client.fetch(tornado_request, callback=callback)
def _required_auth_capability(self):
"""Called by AWSAuthConnection.__init__ in order to determine which
auth handler to construct. In this case, S3 HMAC signing should be used.
"""
return ['s3']
|
examples/application_factory/web.py | aronianm/flask-apscheduler | 942 | 12724339 | """Example web view for application factory."""
from flask import Blueprint
from .extensions import scheduler
from .tasks import task2
web_bp = Blueprint("web_bp", __name__)
@web_bp.route("/")
def index():
"""Say hi!.
:url: /
:returns: hi!
"""
return "hi!"
@web_bp.route("/add")
def add():
"""Add a task.
:url: /add/
:returns: job
"""
job = scheduler.add_job(
func=task2,
trigger="interval",
seconds=10,
id="test job 2",
name="test job 2",
replace_existing=True,
)
return "%s added!" % job.name
|
test/test_functions.py | codeclimate-testing/falcon | 115 | 12724372 | <reponame>codeclimate-testing/falcon<filename>test/test_functions.py<gh_stars>100-1000
from testing_helpers import wrap
@wrap
def nested(x):
def f(y):
return y+y
return f(x)
def test_nested():
nested(3)
nested(3.0)
nested([1])
@wrap
def nested_closure(x):
def f(y):
return x + y
return f(x)
def test_nested_closure():
nested_closure(3)
nested_closure(3.0)
nested_closure([1])
@wrap
def nested_closure_repeat():
for i in xrange(50):
temp = nested_closure(i)
return temp
def test_nested_closure_repeat():
nested_closure_repeat()
if __name__ == '__main__':
import nose
nose.main() |
Python/Algorithms/Dynamic-Programming/0-1_knapsack.py | ThunderZ007/Data-Structures-and-Algorithms | 245 | 12724378 |
# Input Cases
t = int(input("\nTotal Test Cases : "))
for i in range(1,t+1):
print(f"\n------------ CASE #{i} -------------")
n = int(input("\nTotal Items : "))
m = int(input("Max Capacity : "))
v = [int(i) for i in input("\nValues : ").split(" ")]
w = [int(i) for i in input("Weights : ").split(" ")]
# Tabulation (DP)
dp = [[0 for x in range(m+1)] for x in range(n+1)]
for i in range(n+1):
for j in range(m+1):
if i == 0 or j == 0:
dp[i][j] = 0
elif w[i-1]<=j:
dp[i][j] = max(dp[i-1][j],dp[i-1][j-w[i-1]]+v[i-1])
else:
dp[i][j] = dp[i-1][j]
print(f"\nMax Value Picked : {dp[n][m]}") |
main.py | ssysm/DD_KaoRou2 | 187 | 12724417 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import os, sys, requests
from random import randint
from PySide2.QtWidgets import QApplication, QSplashScreen
from PySide2.QtGui import QFont, QPixmap, QIcon
from PySide2.QtCore import Qt, QThread
from utils.main_ui import MainWindow
class downloadUpdates(QThread):
def __init__(self, parent=None):
super(downloadUpdates, self).__init__(parent)
self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'}
def checkUtils(self):
response = requests.get(r'https://github.com/jiafangjun/DD_KaoRou2/tree/master/utils', headers=self.headers)
html = response.text.split('\n')
return html
def downloadSplash(self, html):
for line in html:
if '/splash_' in line and '.png' in line:
splashPage = 'https://github.com/' + line.split('href="')[1].split('"')[0]
localSplashPath = r'utils/%s' % splashPage.split('/')[-1]
if not os.path.exists(localSplashPath):
response = requests.get(splashPage, headers=self.headers)
html = response.text.split('\n')
for l in html:
if localSplashPath + '?raw=true' in l:
splashLink = 'https://github.com' + l.split('src="')[1].split('"')[0]
response = requests.get(splashLink)
img = response.content
with open(localSplashPath, 'wb') as f:
f.write(img) # 将图片按二进制写入本地文件
def run(self):
utilsHtml = self.checkUtils()
self.downloadSplash(utilsHtml)
if __name__ == '__main__':
QApplication.setAttribute(Qt.AA_UseHighDpiPixmaps, True)
QApplication.setAttribute(Qt.AA_EnableHighDpiScaling, True)
app = QApplication(sys.argv)
splashList = []
for f in os.listdir('utils'):
if f.endswith('.png') and 'splash_' in f:
splashList.append(r'utils\%s' % f)
if splashList:
splashPath = splashList[randint(0, len(splashList) - 1)] # 随机选择启动封面
else:
splashPath = ''
splash = QSplashScreen(QPixmap(splashPath))
splash.show()
qss = ''
try:
with open('utils/qdark.qss', 'r') as f:
qss = f.read()
except:
print('警告!找不到QSS文件!请从github项目地址下载完整文件。')
app.setStyleSheet(qss)
app.setFont(QFont('微软雅黑', 9))
desktop = app.desktop()
mainWindow = MainWindow()
mainWindow.setWindowIcon(QIcon(r'utils\favicon.ico'))
screen = app.primaryScreen().geometry()
mainWindow.resize(screen.width() * 0.75, screen.height() * 0.75)
size = mainWindow.geometry()
mainWindow.move((screen.width() - size.width()) / 2,
(screen.height() - size.height()) / 2)
mainWindow.showMaximized()
mainWindow.show()
splash.finish(mainWindow)
downloads = downloadUpdates()
downloads.start()
sys.exit(app.exec_())
|
brainstorm/layers/mask_layer.py | PyCN/brainstorm | 1,473 | 12724426 | #!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
from collections import OrderedDict
from brainstorm.layers.base_layer import Layer
from brainstorm.structure.buffer_structure import StructureTemplate
from brainstorm.structure.construction import ConstructionWrapper
from brainstorm.utils import LayerValidationError, product
def Mask(name=None):
"""Create a Mask layer."""
return ConstructionWrapper.create(MaskLayerImpl, name=name)
class MaskLayerImpl(Layer):
expected_inputs = {'default': StructureTemplate('T', 'B', '...'),
'mask': StructureTemplate('T', 'B', '...')}
computes_no_input_deltas_for = ['mask']
def setup(self, kwargs, in_shapes):
in_shape = in_shapes['default'].feature_shape
expected_shape = in_shape[:-1] + (1,)
if in_shapes['mask'].feature_shape == (1,):
self.flatten_dim = 2
elif in_shapes['mask'].feature_shape in [expected_shape, in_shape]:
self.flatten_dim = len(in_shape) + 1
else:
raise LayerValidationError(
"Shape of the mask did not match shape of the default inputs. "
"Should be either ('T', 'B', 1) or {} or {}, but was {}"
.format(('T', 'B') + expected_shape,
in_shapes['default'].shape,
in_shapes['mask']))
outputs = OrderedDict()
outputs['default'] = in_shapes['default']
return outputs, OrderedDict(), OrderedDict()
def flatten_buffer(self, buffer):
pre = buffer.shape[:self.flatten_dim]
post = buffer.shape[self.flatten_dim:]
return buffer.reshape((int(product(pre)), int(product(post))))
def forward_pass(self, buffers, training_pass=True):
_h = self.handler
flat_inp = self.flatten_buffer(buffers.inputs.default)
flat_mask = self.flatten_buffer(buffers.inputs.mask)
flat_out = self.flatten_buffer(buffers.outputs.default)
_h.mult_mv(flat_inp, flat_mask, out=flat_out)
def backward_pass(self, buffers):
_h = self.handler
flat_out_deltas = self.flatten_buffer(buffers.output_deltas.default)
tmp = self.handler.allocate(flat_out_deltas.shape)
flat_mask = self.flatten_buffer(buffers.inputs.mask)
flat_in_deltas = self.flatten_buffer(buffers.input_deltas.default)
_h.mult_mv(flat_out_deltas, flat_mask, tmp)
_h.add_tt(tmp, flat_in_deltas, flat_in_deltas)
|
leonardo/module/search/tasks.py | timgates42/django-leonardo | 102 | 12724427 | <gh_stars>100-1000
from __future__ import absolute_import
import os
from celery import shared_task
from django.core import management
from leonardo.decorators import catch_result
from django.conf import settings
@shared_task
@catch_result
def sync_search_indexes():
management.call_command('rebuild_index', interactive=False)
# patch whoosh backend
haystack = getattr(settings, 'HAYSTACK_CONNECTIONS', None)
if 'default' in haystack and 'whoosh' in haystack['default']['ENGINE']:
try:
os.remove(os.path.join(
haystack['default']['PATH'], 'MAIN_WRITELOCK'))
except:
pass
return {'result': 'Rebuild index OK'}
|
release/stubs.min/System/__init___parts/HttpStyleUriParser.py | htlcnn/ironpython-stubs | 182 | 12724447 | class HttpStyleUriParser(UriParser):
"""
A customizable parser based on the HTTP scheme.
HttpStyleUriParser()
"""
|
exercises/de/solution_03_14_03.py | Jette16/spacy-course | 2,085 | 12724472 | <filename>exercises/de/solution_03_14_03.py
from spacy.lang.de import German
nlp = German()
people = ["<NAME>", "<NAME>", "<NAME>"]
# Erstelle eine Liste von Patterns für den PhraseMatcher
patterns = list(nlp.pipe(people))
|
qf_lib/documents_utils/document_exporting/pdf_exporter.py | webclinic017/qf-lib | 198 | 12724473 | # Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from os.path import join, abspath, dirname
from typing import List
from weasyprint import HTML, CSS
from qf_lib.common.utils.logging.qf_parent_logger import qf_logger
from qf_lib.documents_utils.document_exporting.document import Document
from qf_lib.documents_utils.document_exporting.document_exporter import DocumentExporter
from qf_lib.settings import Settings
from qf_lib.starting_dir import get_starting_dir_abs_path
class PDFExporter(DocumentExporter):
"""
Stores elements such as the ParagraphElement and ChartElement in order to build a PDF based on them once they
have all been added. If there is a "document_css_directory" attribute set in the Settings, then CSS files from that
directory will be applied for styling the output page. Otherwise the default styling will be applied.
"""
DEFAULT_CSS_DIR_NAME = 'default_css'
def __init__(self, settings: Settings):
super().__init__(settings)
if hasattr(settings, 'document_css_directory'):
self._document_css_dir = join(get_starting_dir_abs_path(), settings.document_css_directory)
else:
this_dir_abs_path = abspath(dirname(__file__))
self._document_css_dir = join(this_dir_abs_path, self.DEFAULT_CSS_DIR_NAME)
self.logger = qf_logger.getChild(self.__class__.__name__)
def set_default_directory_level_up(self):
"""
Sets the document_css_dir one level above 'default css', to enable applying css classes in other folders.
Using the generate function demands inputting css_file_names as paths from newly set level.
e.g: 'default_css\main"
"""
self._document_css_dir = abspath(dirname(__file__))
def generate(self, documents: List[Document], export_dir: str, filename: str,
include_table_of_contents=False, css_file_names: List[str] = None) -> str:
"""
Merged all documents into one and then exports the merged document to a PDF file in the given directory.
Allows defining of multiple css files. The base css file will be applied first, followed sequentially
by files defined in css_file_names.
The CSS files must be placed in the Settings.document_css_directory directory.
CSS files placed in Settings.document_css_directory/base will be applied for all exported PDF documents.
Parameters
----------
documents
list of documents for which files should be generated
export_dir
relative path to the directory (relative to the output root directory) in which the PDF should be saved
filename
filename under which the merged document should be saved
include_table_of_contents
if True then table of contents will be generated at the beginning of the file
css_file_names
names of css files which should be applied for generating the PDF
Returns
-------
the absolute path to the output PDF file that was saved
"""
css_file_paths = []
documents = [self._merge_documents(documents, filename)]
# Find the output directory
output_dir = self.get_output_dir(export_dir)
output_filename = os.path.join(output_dir, filename)
for document in documents:
if include_table_of_contents:
self._add_table_of_contents(document)
# Generate the full document HTML
self.logger.info("Generating HTML for PDF...")
html = document.generate_html()
# Automatically include all the css files in the `document_css/base` directory
base_css = os.listdir(self._document_css_dir)
for name in base_css:
path = os.path.join(self._document_css_dir, name)
if os.path.isfile(path):
css_file_paths.append(CSS(path))
# If we've set custom css files, add them to the pdf
if css_file_names is not None:
for name in css_file_names:
css_file_paths.append(CSS(os.path.join(self._document_css_dir, name + ".css")))
# Parse the HTML.
html = HTML(string=html)
# Write out the PDF.
self.logger.info("Rendering PDF in {}...".format(output_filename))
html.write_pdf(output_filename, css_file_paths)
return output_filename
|
solutions/LeetCode/Python3/22.py | timxor/leetcode-journal | 854 | 12724487 | __________________________________________________________________________________________________
36ms
class Solution:
def generateParenthesis(self, n: 'int') -> 'List[str]':
if n == 0: return ['']
ans = []
def backtrack(S = '',left = 0, right = 0):
if len(S) == 2 * n:
ans.append(S)
return
if left < n:
backtrack(S+'(',left + 1, right)
if right < left:
backtrack(S+')',left,right + 1)
backtrack()
return ans
__________________________________________________________________________________________________
40ms
class Solution:
def generateParenthesis(self, n):
"""
:type n: int
:rtype: List[str]
"""
res = []
def helper(l_num = 0, r_num = 0, s = ''):
if len(s) == 2*n:
res.append(s)
return
if l_num < n:
helper(l_num+1,r_num,s+'(')
if l_num > r_num:
helper(l_num,r_num+1,s+')')
helper()
return res
__________________________________________________________________________________________________
44ms
class Solution:
def generateParenthesis(self, n: int) -> List[str]:
result = []
if n == 0:
return [""]
def gene_par(index, present_sum, strs):
if index == 2*n:
if present_sum == 1:
result.append(strs+")")
return
if present_sum > 0:
gene_par(index + 1, present_sum+1, strs+"(")
gene_par(index + 1, present_sum-1, strs + ")")
else:
gene_par(index + 1, present_sum + 1, strs + "(")
gene_par(1, 0, "")
return result
__________________________________________________________________________________________________
12396 kb
class Solution:
def generateParenthesis(self, n: 'int') -> 'List[str]':
if n <= 0:
return []
if n == 1:
return ["()"]
else:
prev = self.generateParenthesis(n-1)
fresh = set()
for line in prev:
fresh.add("()" + line)
fresh.add(line + "()")
fresh.add("(" + line + ")")
for i in range(1,len(line)):
fresh.add(line[:i] + "()" + line[i:])
return list(fresh)
__________________________________________________________________________________________________
12424 kb
class Solution:
def generateParenthesis(self, n: 'int') -> 'List[str]':
res=[]
def rec(str, iter):
if iter == 0 :
#print(str)
if str not in res:
res.append(str)
return
rec(str+'()', iter-1)
rec('()'+str, iter-1)
rec('('+str+')', iter-1)
#rec('', n)
#return res
def BT(str, left, right):
#print(str,left,right)
if(left<0):
return
if(right<0):
return
if(left>right ):
return
if(left==0 and right==0):
print(str)
res.append(str)
return
BT(str+'(', left-1, right)
BT(str+')', left, right-1)
BT('', n,n)
return res
__________________________________________________________________________________________________
|
pyretri/datasets/folder/folder_base.py | dongan-beta/PyRetri | 1,063 | 12724532 | <gh_stars>1000+
# -*- coding: utf-8 -*-
import numpy as np
from PIL import Image
import pickle
import os
from abc import abstractmethod
from ...utils import ModuleBase
from typing import Dict, List
class FolderBase(ModuleBase):
"""
The base class of folder function.
"""
default_hyper_params = dict()
def __init__(self, data_json_path: str, transformer: callable or None = None, hps: Dict or None = None):
"""
Args:
data_json_path (str): the path for data json file.
transformer (callable): a list of data augmentation operations.
hps (dict): default hyper parameters in a dict (keys, values).
"""
super(FolderBase, self).__init__(hps)
with open(data_json_path, "rb") as f:
self.data_info = pickle.load(f)
self.data_json_path = data_json_path
self.transformer = transformer
def __len__(self) -> int:
pass
@abstractmethod
def __getitem__(self, idx: int) -> Dict:
pass
def find_classes(self, info_dicts: Dict) -> (List, Dict):
pass
def read_img(self, path: str) -> Image:
"""
Load image.
Args:
path (str): the path of the image.
Returns:
image (Image): shape (H, W, C).
"""
try:
img = Image.open(path)
img = img.convert("RGB")
return img
except Exception as e:
print('[DataSet]: WARNING image can not be loaded: {}'.format(str(e)))
return None
|
code_examples/cython_spring16/geometry_py.py | mikofski/thw-berkeley | 106 | 12724564 | <reponame>mikofski/thw-berkeley<gh_stars>100-1000
import math
def sum_circle(data, x, y, r):
"""Sum array values that fall within the given circle.
Parameters
----------
data : numpy.ndarray
The array to sum.
x, y, r : float
The center and radius of circle, in array coordinates.
"""
imin = math.floor((x - r) + 0.5)
imax = math.floor((x + r) + 0.5)
jmin = math.floor((y - r) + 0.5)
jmax = math.floor((y + r) + 0.5)
r2 = r * r
sum = 0.0
for j in range(jmin, jmax+1):
for i in range(imin, imax+1):
if (i - x)**2 + (j - y)**2 < r2:
sum += data[j, i]
return sum
|
example_dialogs.py | timeopochin/picotui | 739 | 12724587 | from picotui.context import Context
from picotui.dialogs import *
with Context():
# Feel free to comment out extra dialogs to play with a particular
# in detail
d = DTextEntry(25, "Hello World", title="Wazzup?")
res = d.result()
d = DMultiEntry(25, 5, "Hello\nWorld".split("\n"), title="Comment:")
res = d.result()
print(res)
|
homura/vision/models/densenet.py | wangjunyan305/homura | 102 | 12724628 | <reponame>wangjunyan305/homura
"""
DenseNet for CIFAR dataset proposed in Gao et al. 2016
https://github.com/liuzhuang13/DenseNet
"""
import torch
from torch import nn
from torch.nn import functional as F
from homura.vision.models import MODEL_REGISTRY
__all__ = ["densenet40", "densenet100", "CIFARDenseNet"]
_padding = {"reflect": nn.ReflectionPad2d,
"zero": nn.ZeroPad2d}
class _DenseLayer(nn.Module):
def __init__(self, in_channels, bn_size, growth_rate, dropout_rate, padding):
super(_DenseLayer, self).__init__()
assert padding in _padding.keys()
self.dropout_rate = dropout_rate
self.layers = nn.Sequential(nn.BatchNorm2d(in_channels),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels, bn_size * growth_rate, kernel_size=1, stride=1,
bias=False),
nn.BatchNorm2d(bn_size * growth_rate),
nn.ReLU(inplace=True),
_padding[padding](1),
nn.Conv2d(bn_size * growth_rate, growth_rate, kernel_size=3, stride=1,
bias=False))
def forward(self, input):
x = self.layers(input)
if self.dropout_rate > 0:
x = F.dropout(x, p=self.dropout_rate, training=self.training)
return torch.cat([input, x], dim=1)
class _DenseBlock(nn.Module):
def __init__(self, num_layers, in_channels, bn_size, growth_rate, dropout_rate, padding):
super(_DenseBlock, self).__init__()
layers = [_DenseLayer(in_channels + i * growth_rate, bn_size, growth_rate, dropout_rate, padding)
for i in range(num_layers)]
self.layers = nn.Sequential(*layers)
def forward(self, input):
return self.layers(input)
class _Transition(nn.Module):
def __init__(self, in_channels, out_channels):
super(_Transition, self).__init__()
self.layers = nn.Sequential(nn.BatchNorm2d(in_channels),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False),
nn.AvgPool2d(kernel_size=2, stride=2))
def forward(self, input):
return self.layers(input)
@MODEL_REGISTRY.register
class CIFARDenseNet(nn.Module):
"""
DenseNet-BC (bottleneck and compactness) for CIFAR dataset. For ImageNet classification, use `torchvision`'s.
:param num_classes: (int) number of output classes
:param init_channels: (int) output channels which is performed on the input. 16 or 2 * growth_rate
:param num_layers: (int) number of layers of each dense block
:param growth_rate: (int) growth rate, which is referred as k in the paper
:param dropout_rate: (float=0) dropout rate
:param bn_size: (int=4) multiplicative factor in bottleneck
:param reduction: (int=2) divisional factor in transition
"""
def __init__(self, num_classes, init_channels, num_layers, growth_rate, dropout_rate=0, bn_size=4, reduction=2,
padding="reflect"):
super(CIFARDenseNet, self).__init__()
# initial conv.
num_channels = init_channels
layers = [_padding[padding](1), nn.Conv2d(3, num_channels, kernel_size=3, bias=False)]
# first and second dense-block+transition
for _ in range(2):
layers.append(_DenseBlock(num_layers, in_channels=num_channels, bn_size=bn_size,
growth_rate=growth_rate, dropout_rate=dropout_rate, padding=padding))
num_channels = num_channels + num_layers * growth_rate
layers.append(_Transition(num_channels, num_channels // reduction))
num_channels = num_channels // reduction
# third denseblock
layers.append(_DenseBlock(num_layers, in_channels=num_channels, bn_size=bn_size, growth_rate=growth_rate,
dropout_rate=dropout_rate, padding="reflect"))
self.features = nn.Sequential(*layers)
self.bn1 = nn.BatchNorm2d(num_channels + num_layers * growth_rate)
self.linear = nn.Linear(num_channels + num_layers * growth_rate, num_classes)
# initialize parameters
self.initialize()
def forward(self, input):
x = self.features(input)
x = F.relu(self.bn1(x), inplace=True)
x = F.adaptive_avg_pool2d(x, 1)
x = x.view(x.size(0), -1)
return self.linear(x)
def initialize(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight.data)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _cifar_densenet(depth, num_classes, growth_rate=12, **kwargs):
n = (depth - 4) // 6
model = CIFARDenseNet(num_classes, init_channels=2 * growth_rate, num_layers=n, growth_rate=growth_rate,
padding="reflect", **kwargs)
return model
@MODEL_REGISTRY.register
def densenet100(num_classes, **kwargs):
return _cifar_densenet(100, num_classes, **kwargs)
@MODEL_REGISTRY.register
def densenet40(num_classes, **kwargs):
return _cifar_densenet(40, num_classes, **kwargs)
|
models/ops.py | yhgon/tacotron | 242 | 12724638 | import tensorflow as tf
from tensorflow.contrib.seq2seq.python.ops.helper import CustomHelper
from tensorflow.contrib.rnn import *
class InferenceHelper(CustomHelper):
def _initialize_fn(self):
# we always reconstruct the whole output
finished = tf.tile([False], [self._batch_size])
next_inputs = tf.zeros([self._batch_size, self._out_size], dtype=tf.float32)
return (finished, next_inputs)
def _sample_fn(self, time, outputs, state):
# we're not sampling from a vocab so we don't care about this function
return tf.zeros(32, dtype=tf.int32)
def _next_inputs_fn(self, time, outputs, state, sample_ids):
del time, sample_ids
finished = tf.tile([False], [self._batch_size])
next_inputs = outputs
return (finished, next_inputs, state)
def __init__(self, batch_size, out_size):
self._batch_size = batch_size
self._out_size = out_size
def highway(inputs, units=128):
# correct input shape
if inputs.shape[-1] != units:
inputs = tf.layers.dense(inputs, units=units)
T = tf.layers.dense(
inputs,
units=units,
activation=tf.nn.sigmoid,
)
# TODO update bias initial value
H = tf.layers.dense(
inputs,
units=units,
activation=tf.nn.relu
)
C = H*T + inputs*(1-T)
return C
def CBHG(inputs, speaker_embed=None,
K=16, c=[128,128,128], gru_units=128, num_highway_layers=4, num_conv_proj=2):
with tf.variable_scope('cbhg'):
# 1D convolution bank
conv_bank = [tf.layers.conv1d(
inputs,
filters=c[0],
kernel_size=k,
padding='same',
activation=tf.nn.relu
) for k in range(1, K+1)]
conv_bank = tf.concat(conv_bank, -1)
conv_bank = tf.layers.batch_normalization(conv_bank)
conv_bank = tf.layers.max_pooling1d(
conv_bank,
pool_size=2,
strides=1,
padding='same'
)
tf.summary.histogram('conv_bank', conv_bank)
assert num_conv_proj == len(c) - 1
conv_proj = conv_bank
for layer in range(num_conv_proj):
activation = None if layer == num_conv_proj - 1 else tf.nn.relu
# conv projections
conv_proj = tf.layers.conv1d(
conv_proj,
filters=c[layer+1],
kernel_size=3,
padding='same',
activation=activation
)
conv_proj = tf.layers.batch_normalization(conv_proj)
tf.summary.histogram('conv_proj', conv_proj)
# residual connection
conv_res = conv_proj + inputs
tf.summary.histogram('conv_res', conv_res)
# highway feature extraction
h = conv_res
for layer in range(num_highway_layers):
with tf.variable_scope('highway_' + str(layer)):
# site specific speaker embedding
if speaker_embed is not None:
s = tf.layers.dense(speaker_embed, h.shape[-1], activation=tf.nn.relu)
s = tf.tile(tf.expand_dims(s, 1), [1, tf.shape(h)[1], 1])
h = tf.concat([h, s], 2)
h = highway(h)
tf.summary.histogram('highway_out', h)
# site specfic speaker embedding
if speaker_embed is not None:
s = tf.layers.dense(speaker_embed, gru_units, activation=tf.nn.relu)
else:
s = None
# bi-GRU
forward_gru_cell = GRUCell(gru_units)
backward_gru_cell = GRUCell(gru_units)
out, _ = tf.nn.bidirectional_dynamic_rnn(
forward_gru_cell,
backward_gru_cell,
h,
initial_state_fw=s,
initial_state_bw=s,
dtype=tf.float32
)
out = tf.concat(out, 2)
tf.summary.histogram('encoded', out)
return out
|
examples/plot_sars.py | skovic/SHARPpy | 163 | 12724658 | <reponame>skovic/SHARPpy<gh_stars>100-1000
"""
Plotting data from the SARS database
====================================
"""
import sharppy.sharptab as tab
import sharppy.databases.sars as sars
import numpy as np
import os
import matplotlib.pyplot as plt
database_fn = os.path.join( os.path.dirname( sars.__file__ ), 'sars_supercell.txt')
supercell_database = np.loadtxt(database_fn, skiprows=1, dtype=bytes, comments="%%%%")
magnitude = []
mlcape = []
srh01 = []
for record in supercell_database:
magnitude.append(int(record[1]))
mlcape.append(float(record[3]))
srh01.append(float(record[6]))
plt.grid()
plt.scatter(mlcape, srh01, c=magnitude, marker='.')
plt.colorbar()
plt.xlabel("MLCAPE [J/kg]")
plt.ylabel(r'0-1 km Storm Relative Helicity [$m^{2}/s^{2}$]')
plt.savefig('plot_sars.png', bbox_inches='tight')
plt.show()
|
GCC-paddle/gcc/tasks/__init__.py | S-HuaBomb/Contrib | 243 | 12724659 | from gcc.models.emb import (
FromNumpy,
FromNumpyAlign,
FromNumpyGraph,
GraphWave,
ProNE,
Zero,
)
def build_model(name, hidden_size, **model_args):
return {
"zero": Zero,
"from_numpy": FromNumpy,
"from_numpy_align": FromNumpyAlign,
"from_numpy_graph": FromNumpyGraph,
"prone": ProNE,
"graphwave": GraphWave,
}[name](hidden_size, **model_args)
|
micro-benchmark/snippets/assignments/chained/main.py | WenJinfeng/PyCG | 121 | 12724689 | <filename>micro-benchmark/snippets/assignments/chained/main.py<gh_stars>100-1000
def func1():
pass
def func2():
pass
a = b = func1
b()
a = b = func2
a()
|
notebook/pandas_ohlc_downsampling.py | vhn0912/python-snippets | 174 | 12724711 | <gh_stars>100-1000
import pandas as pd
df = pd.read_csv('data/src/aapl_2015_2019.csv', index_col=0, parse_dates=True)['2017']
print(df)
# open high low close volume
# 2017-01-03 115.80 116.3300 114.760 116.15 28781865
# 2017-01-04 115.85 116.5100 115.750 116.02 21118116
# 2017-01-05 115.92 116.8642 115.810 116.61 22193587
# 2017-01-06 116.78 118.1600 116.470 117.91 31751900
# 2017-01-09 117.95 119.4300 117.940 118.99 33561948
# ... ... ... ... ... ...
# 2017-12-22 174.68 175.4240 174.500 175.01 16052615
# 2017-12-26 170.80 171.4700 169.679 170.57 32968167
# 2017-12-27 170.10 170.7800 169.710 170.60 21672062
# 2017-12-28 171.00 171.8500 170.480 171.08 15997739
# 2017-12-29 170.52 170.5900 169.220 169.23 25643711
#
# [251 rows x 5 columns]
d_ohlc = {'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last'}
print(df.resample('MS').agg(d_ohlc))
# open high low close
# 2017-01-01 115.80 122.4400 114.76 121.35
# 2017-02-01 127.03 137.4800 127.01 136.99
# 2017-03-01 137.89 144.5000 137.05 143.66
# 2017-04-01 143.71 145.4600 140.06 143.65
# 2017-05-01 145.10 156.6500 144.27 152.76
# 2017-06-01 153.17 155.9800 142.20 144.02
# 2017-07-01 144.88 153.9900 142.41 148.73
# 2017-08-01 149.10 164.5200 148.41 164.00
# 2017-09-01 164.80 164.9400 149.16 154.12
# 2017-10-01 154.26 169.6499 152.46 169.04
# 2017-11-01 169.87 176.2400 165.28 171.85
# 2017-12-01 169.95 177.2000 166.46 169.23
print(df.resample('QS').agg(d_ohlc))
# open high low close
# 2017-01-01 115.80 144.50 114.76 143.66
# 2017-04-01 143.71 156.65 140.06 144.02
# 2017-07-01 144.88 164.94 142.41 154.12
# 2017-10-01 154.26 177.20 152.46 169.23
print(df.resample('2W-MON', closed='left', label='left').agg(d_ohlc))
# open high low close
# 2017-01-02 115.800 119.9300 114.7600 119.04
# 2017-01-16 118.340 122.4400 118.2200 121.95
# 2017-01-30 120.930 132.9400 120.6200 132.12
# 2017-02-13 133.080 137.4800 132.7500 136.66
# 2017-02-27 137.140 140.2786 136.2800 139.14
# 2017-03-13 138.850 142.8000 138.8200 140.64
# 2017-03-27 139.390 145.4600 138.6200 143.34
# 2017-04-10 143.600 143.8792 140.0600 142.27
# 2017-04-24 143.500 148.9800 143.1800 148.96
# 2017-05-08 149.030 156.6500 149.0300 153.06
# 2017-05-22 154.000 155.4500 152.2200 155.45
# 2017-06-05 154.340 155.9800 142.2000 142.27
# 2017-06-19 143.660 148.2800 142.2800 144.02
# 2017-07-03 144.880 149.3300 142.4100 149.04
# 2017-07-17 148.820 153.9900 147.3000 149.50
# 2017-07-31 149.900 161.8300 148.1300 157.48
# 2017-08-14 159.320 162.5100 155.1101 159.86
# 2017-08-28 160.140 164.9400 158.5300 158.63
# 2017-09-11 160.500 163.9600 150.5600 151.89
# 2017-09-25 149.990 155.4900 149.1600 155.30
# 2017-10-09 155.810 160.8700 155.0200 156.25
# 2017-10-23 156.890 174.2600 155.2700 172.50
# 2017-11-06 172.365 176.2400 168.3800 170.15
# 2017-11-20 170.290 175.5000 167.1600 171.05
# 2017-12-04 172.480 174.1700 166.4600 173.97
# 2017-12-18 174.880 177.2000 169.2200 169.23
d_ohlcv = {'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last',
'volume': 'sum'}
print(df.resample('MS').agg(d_ohlcv))
# open high low close volume
# 2017-01-01 115.80 122.4400 114.76 121.35 563331160
# 2017-02-01 127.03 137.4800 127.01 136.99 574968547
# 2017-03-01 137.89 144.5000 137.05 143.66 562091214
# 2017-04-01 143.71 145.4600 140.06 143.65 371280180
# 2017-05-01 145.10 156.6500 144.27 152.76 635292989
# 2017-06-01 153.17 155.9800 142.20 144.02 664986406
# 2017-07-01 144.88 153.9900 142.41 148.73 411377229
# 2017-08-01 149.10 164.5200 148.41 164.00 638221161
# 2017-09-01 164.80 164.9400 149.16 154.12 669594016
# 2017-10-01 154.26 169.6499 152.46 169.04 496135305
# 2017-11-01 169.87 176.2400 165.28 171.85 581876496
# 2017-12-01 169.95 177.2000 166.46 169.23 518560008
|
tests/pki/test_models.py | pythonModule/commandment | 138 | 12724716 | <reponame>pythonModule/commandment
import pytest
import os.path
import logging
from cryptography import x509
from cryptography.hazmat.primitives.asymmetric import rsa
from commandment.pki.models import RSAPrivateKey, CACertificate
logger = logging.getLogger(__name__)
class TestModels:
def test_rsa_privatekey_from_crypto(self, private_key: rsa.RSAPrivateKeyWithSerialization, session):
m = RSAPrivateKey.from_crypto(private_key)
session.add(m)
session.commit()
assert m.id is not None
assert m.pem_data is not None
def test_ca_certificate_from_crypto(self, ca_certificate: x509.Certificate, session):
m = CACertificate.from_crypto(ca_certificate)
session.add(m)
session.commit()
assert m.id is not None
assert m.pem_data is not None
assert m.fingerprint is not None
assert m.x509_cn is not None
|
models/data_manager.py | nawshad/multi-task-NLP | 308 | 12724721 | <reponame>nawshad/multi-task-NLP
'''
Script to manage datasets for multiple tasks
'''
from torch.utils.data import Dataset, DataLoader, BatchSampler
from utils.data_utils import TaskType, ModelType
import torch
import random
import logging
import json
logger = logging.getLogger("multi_task")
class allTasksDataset(Dataset):
'''
class to make pytorch dataset of the processed data for a specific task
taskDict :- list of dictionaries. Each dictioanry belong to the details of a
dataset to be created for a task
[ {"data_task_id" : "", "data_path" : "", "data_task_type" : ""},
...]
'''
def __init__(self, taskDict, pipeline = False):
self.taskDict = taskDict
self.pipeline = pipeline
self.allTasksData, self.taskIdTypeMap = self.make_all_datasets()
def read_data(self, readPath):
with open(readPath, 'r', encoding = 'utf-8') as file:
logger.info('Reading data from file {}'.format(readPath))
taskData = []
for i, line in enumerate(file):
#if i >=1000:
#continue
sample = json.loads(line)
taskData.append(sample)
return taskData
def make_all_datasets(self):
'''
For each dataset entry in the taskDict, this function makes them into corresponding dataset
and returns a dictionary mapping like {<task_id> : <dataset>,}
'''
allTasksData = {}
taskIdTypeMap = {} # mapping from task id to task type
for task in self.taskDict:
if self.pipeline:
logger.info('Reading data for pipeline')
data = task["data_"]
else:
data = self.read_data(task["data_path"])
allTasksData[task["data_task_id"]] = data
taskIdTypeMap[task["data_task_id"]] = task["data_task_type"]
logger.info('Read Data for Task Id: {} Task Name: {}. Samples {}'.format(task["data_task_id"], task["data_task_name"], len(data)))
return allTasksData, taskIdTypeMap
# some standard functions which need to be overridden from Dataset
#class for item, len etc..
def __len__(self):
return sum(len(v) for k, v in self.allTasksData.items())
# get item will be used to fetch a sample when required for the corresponding task id.
def __getitem__(self, idx):
taskId, sampleId = idx
out = {"task": {"task_id": taskId, "task_type": self.taskIdTypeMap[taskId]},
"sample": self.allTasksData[taskId][sampleId]}
return out
class Batcher(BatchSampler):
def __init__(self, dataObj, batchSize, shuffleTask = True, shuffleBatch = True, seed = 42):
'''
dataObj :- An instance of allTasksDataset containing data for all tasks
'''
self.dataObj = dataObj
self.allTasksData = dataObj.allTasksData
self.batchSize = batchSize
# to shuffle the indices in a batch
self.shuffleBatch = shuffleBatch
# to shuffle the samples picked up among all the tasks
self.shuffleTask = shuffleTask
self.seed = seed
self.allTasksDataBatchIdxs = []
self.taskIdxId = []
for taskId, data in self.allTasksData.items():
self.allTasksDataBatchIdxs.append(self.make_batches(len(data)))
self.taskIdxId.append(taskId)
def make_batches(self, dataSize):
batchIdxs = [list(range(i, min(i+self.batchSize, dataSize))) for i in range(0, dataSize, self.batchSize)]
if self.shuffleBatch:
random.seed(self.seed)
random.shuffle(batchIdxs)
return batchIdxs
def make_task_idxs(self):
'''
This fn makes task indices for which a corresponding batch is created
eg. [0, 0, 1, 3, 0, 2, 3, 1, 1, ..] if task ids are 0,1,2,3
'''
taskIdxs = []
for i in range(len(self.allTasksDataBatchIdxs)):
taskIdxs += [i]*len(self.allTasksDataBatchIdxs[i])
if self.shuffleTask:
random.seed(self.seed)
random.shuffle(taskIdxs)
return taskIdxs
#over riding BatchSampler functions to generate iterators for all tasks
# and iterate
def __len__(self):
return sum(len(data) for taskId, data in self.allTasksData.items())
def __iter__(self):
allTasksIters = [iter(item) for item in self.allTasksDataBatchIdxs]
#all_iters = [iter(item) for item in self._train_data_list]
allIdxs = self.make_task_idxs()
for taskIdx in allIdxs:
# this batch belongs to a specific task id
batchTaskId = self.taskIdxId[taskIdx]
batch = next(allTasksIters[taskIdx])
yield [(batchTaskId, sampleIdx) for sampleIdx in batch]
def patch_data(self, batch_info, batch_data, gpu = None):
if gpu:
for i, part in enumerate(batch_data):
if part is not None:
if isinstance(part, torch.Tensor):
batch_data[i] = part.pin_memory().cuda(non_blocking=True)
elif isinstance(part, tuple):
batch_data[i] = tuple(sub_part.pin_memory().cuda(non_blocking=True) for sub_part in part)
elif isinstance(part, list):
batch_data[i] = [sub_part.pin_memory().cuda(non_blocking=True) for sub_part in part]
else:
raise TypeError("unknown batch data type at %s: %s" % (i, part))
return batch_info, batch_data
class batchUtils:
'''
This class is supposed to perform function which will help complete the batch data
when DataLoader creates batch using allTasksDataset and Batcher.
Main function would be
1. A function to make get the various components of input in batch samples and make them into
Pytorch Tensors like token_id, type_ids, masks.
2. Collater function :- This function will use the above function to convert the batch into
pytorch tensor inputs. As converting all the data into pytorch tensors before might not be a good
idea due to space, hence this custom function will be used to convert the batches into tensors on the fly
by acting as custom collater function to DataLoader
'''
def __init__(self, isTrain, modelType, maxSeqLen, dropout = 0.005):
self.isTrain = isTrain
self.modelType = modelType
self.maxSeqLen = maxSeqLen
#self.dropout = dropout
def check_samples_len(self, batch):
#function to check whether all samples are having the maxSeqLen mentioned
for samp in batch:
assert len(samp['token_id']) == self.maxSeqLen, "token_id len doesn't match max seq len"
# for multiple encoders
if samp['type_id'] is not None:
assert len(samp['type_id']) == self.maxSeqLen, "type_id len doesn't match max seq len"
if samp['mask'] is not None:
assert len(samp['mask']) == self.maxSeqLen, "mask len doesn't match max seq len"
def make_batch_to_input_tensor(self, batch):
#check len in batch data
self.check_samples_len(batch)
batchSize = len(batch)
hasTypeIds = True
hasAttnMasks = True
if batch[0]['type_id'] is None:
hasTypeIds = False
if batch[0]['mask'] is None:
hasAttnMasks = False
#initializing token id, type id, attention mask tensors for this batch
tokenIdsBatchTensor = torch.LongTensor(batchSize, self.maxSeqLen).fill_(0)
typeIdsBatchTensor = torch.LongTensor(batchSize, self.maxSeqLen).fill_(0)
masksBatchTensor = torch.LongTensor(batchSize, self.maxSeqLen).fill_(0)
#fillling in data from sample
for i, sample in enumerate(batch):
tokenIdsBatchTensor[i] = torch.LongTensor(sample['token_id'])
if hasTypeIds:
typeIdsBatchTensor[i] = torch.LongTensor(sample['type_id'])
if hasAttnMasks:
masksBatchTensor[i] = torch.LongTensor(sample['mask'])
# meta deta will store more things like task id, task type etc.
batchMetaData = {"token_id_pos" : 0, "type_id_pos" : 1, "mask_pos" : 2}
batchData = [tokenIdsBatchTensor, None, None] #None, None in case type ids, attnMasks not required by model
if hasTypeIds:
batchData[1] = typeIdsBatchTensor
if hasAttnMasks:
batchData[2] = masksBatchTensor
return batchMetaData, batchData
def collate_fn(self, batch):
'''
This function will be used by DataLoader to return batches
'''
taskId = batch[0]["task"]["task_id"]
taskType = batch[0]["task"]["task_type"]
orgBatch = []
labels = []
for sample in batch:
assert sample["task"]["task_id"] == taskId
assert sample["task"]["task_type"] == taskType
orgBatch.append(sample["sample"])
labels.append(sample["sample"]["label"])
batch = orgBatch
#making tensor batch data
batchMetaData, batchData = self.make_batch_to_input_tensor(batch)
batchMetaData['task_id'] = taskId
batchMetaData['task_type'] = taskType
#adding label tensor when training (as they'll used for loss calculatoion and update)
# and in evaluation, it won't go with batch data, rather will keep it with meta data for metrics
if self.isTrain:
if taskType in (TaskType.SingleSenClassification, TaskType.SentencePairClassification, TaskType.NER):
batchData.append(torch.LongTensor(labels))
#position for label
batchMetaData['label_pos'] = len(batchData) - 1
else:
# for test/eval labels won't be added into batch, but kept in meta data
# so metric evaluation can be done
#batchData :- [tokenIdsBatchTensor, typeIdsBatchTensor, MasksBatchTensor]
batchMetaData['label'] = labels
batchMetaData['uids'] = [sample['uid'] for sample in batch] # used in scoring
return batchMetaData, batchData
|
conanfile.py | dbacchet/entt | 6,792 | 12724781 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from conans import ConanFile
class EnttConan(ConanFile):
name = "entt"
description = "Gaming meets modern C++ - a fast and reliable entity-component system (ECS) and much more "
topics = ("conan," "entt", "gaming", "entity", "ecs")
url = "https://github.com/skypjack/entt"
homepage = url
author = "<NAME> <<EMAIL>>"
license = "MIT"
exports = ["LICENSE"]
exports_sources = ["src/*"]
no_copy_source = True
def package(self):
self.copy(pattern="LICENSE", dst="licenses")
self.copy(pattern="*", dst="include", src="src", keep_path=True)
def package_info(self):
if not self.in_local_cache:
self.cpp_info.includedirs = ["src"]
def package_id(self):
self.info.header_only()
|
components/espcoredump/corefile/riscv.py | cablelabs/esp-idf | 8,747 | 12724822 | #
# Copyright 2021 Espressif Systems (Shanghai) CO., LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from construct import Int16ul, Int32ul, Padding, Struct
from corefile import BaseArchMethodsMixin, BaseTargetMethods, ESPCoreDumpLoaderError
try:
from typing import Any, Optional, Tuple
except ImportError:
pass
RISCV_GP_REGS_COUNT = 32
PRSTATUS_SIZE = 204
PRSTATUS_OFFSET_PR_CURSIG = 12
PRSTATUS_OFFSET_PR_PID = 24
PRSTATUS_OFFSET_PR_REG = 72
ELF_GREGSET_T_SIZE = 128
PrStruct = Struct(
Padding(PRSTATUS_OFFSET_PR_CURSIG),
'pr_cursig' / Int16ul,
Padding(PRSTATUS_OFFSET_PR_PID - PRSTATUS_OFFSET_PR_CURSIG - Int16ul.sizeof()),
'pr_pid' / Int32ul,
Padding(PRSTATUS_OFFSET_PR_REG - PRSTATUS_OFFSET_PR_PID - Int32ul.sizeof()),
'regs' / Int32ul[RISCV_GP_REGS_COUNT],
Padding(PRSTATUS_SIZE - PRSTATUS_OFFSET_PR_REG - ELF_GREGSET_T_SIZE)
)
class RiscvMethodsMixin(BaseArchMethodsMixin):
@staticmethod
def get_registers_from_stack(data, grows_down):
# type: (bytes, bool) -> Tuple[list[int], Optional[dict[int, int]]]
regs = Int32ul[RISCV_GP_REGS_COUNT].parse(data)
if not grows_down:
raise ESPCoreDumpLoaderError('Growing up stacks are not supported for now!')
return regs, None
@staticmethod
def build_prstatus_data(tcb_addr, task_regs): # type: (int, list[int]) -> Any
return PrStruct.build({
'pr_cursig': 0,
'pr_pid': tcb_addr,
'regs': task_regs,
})
class Esp32c3Methods(BaseTargetMethods, RiscvMethodsMixin):
TARGET = 'esp32c3'
|
DEPRECATED_PYTHON_SRC/component/hosts.py | 17701253801/firefly-proxy | 5,895 | 12724834 | import os
import codecs
import json
import collections
from collections import defaultdict
from gevent import socket
from fnmatch import fnmatch
if os.name == 'nt':
import win_inet_pton
socket.inet_pton = win_inet_pton.inet_pton
socket.inet_ntop = win_inet_pton.inet_ntop
from gsocks.smart_relay import ForwardDestination
from lib.utils import load_file, remote_update_datafile
def create_connection_hosts(addrs, port, timeout):
for addr in addrs:
try:
return socket.create_connection((addr, port), timeout=timeout)
except:
pass
raise socket.error("all addrs are failed.") # @UndefinedVariable
def create_hosts(rootdir, confdata):
f = codecs.open(os.path.join(rootdir, confdata['hosts']['meta']), "r", "utf-8")
meta = json.loads(f.read(), object_pairs_hook=collections.OrderedDict)
f.close()
disabled = load_file(os.path.join(rootdir, confdata['hosts']['disabled']))
data = load_file(os.path.join(rootdir, confdata['hosts']['data']))
enable = int(confdata['hosts']['enable'])!=0
return FireflyHosts(enable, data, meta, disabled)
def detect_ipv6():
try:
addrinfo = socket.getaddrinfo("www.google.com", 80)
af, _, _, _, _ = addrinfo[0]
return af == socket.AF_INET6 # @UndefinedVariable
except:
return False
def hosts_info(rootdir, confdata, hosts):
return (
os.path.join(rootdir, confdata['hosts']['data']),
hosts.enable,
hosts.count(),
hosts.groups(),
hosts.meta['date'],
)
def remote_update_hosts(proxies, rootdir, confdata):
metafile = os.path.join(rootdir, confdata['hosts']['meta'])
metaurl = confdata['hosts']['meta_url']
datafile = os.path.join(rootdir, confdata['hosts']['data'])
dataurl = confdata['hosts']['data_url']
f = codecs.open(metafile, "r", "utf-8")
meta = json.loads(f.read(), object_pairs_hook=collections.OrderedDict)
f.close()
return remote_update_datafile(proxies, meta, metafile, metaurl, datafile, dataurl)
class FireflyHosts(object):
def __init__(self, enable, data, meta, disabled):
self.enable = enable
self.data = defaultdict(list)
self.meta = meta
self.disabled = set(disabled)
self.has_ipv6 = None
for entry in data:
try:
parts = entry.split()
parts = [s.strip() for s in parts]
parts = [s for s in parts if not s.startswith("#")]
addr, name = parts
if "." in addr:
socket.inet_pton(socket.AF_INET, addr) # @UndefinedVariable
else:
socket.inet_pton(socket.AF_INET6, addr) # @UndefinedVariable
self.data[name.encode("idna")].append(addr)
except Exception, e:
pass
#print "[Hosts]: ", entry, str(e)
def count(self):
return len(self.data.keys())
def disable(self, groupname):
self.disabled.add(groupname)
def match_domain(self, domain, host):
if fnmatch(domain, host):
return True
parts = host.split(".")
for i in range(len(parts)-1, -1, -1):
if ".".join(parts[i:]) == domain:
return True
return False
def need_redirect(self, method, host):
if method != "GET":
return False
groups = self.meta.get('groups', {})
for (_, domains) in groups.iteritems():
for (domain, redirect) in domains:
if self.match_domain(domain, host) and redirect:
return True
return False
def is_disabled(self, host):
groups = self.meta.get('groups', {})
for groupname in self.disabled:
domains = groups.get(groupname, [])
for (domain, _) in domains:
if self.match_domain(domain, host):
return True
return False
def __classify(self, addrs):
v4 = []
v6 = []
for addr in addrs:
if ":" in addr:
v6.append(addr)
else:
v4.append(addr)
if self.has_ipv6:
# assume ipv4 is always available.
return v6 + v4
else:
return v4
def find(self, host):
if not self.enable:
print "hosts disabled ..."
return None
if self.has_ipv6 == None:
self.has_ipv6 = detect_ipv6()
for name, addrs in self.data.iteritems():
if name == host and not self.is_disabled(host):
addrs = self.__classify(addrs)
if addrs:
return ForwardDestination("hosts", addrs)
else:
return None
return None
def groups(self):
ret = []
names = self.meta.get('groups', {}).keys()
for name in names:
if name in self.disabled:
ret.append((name, False))
else:
ret.append((name, True))
return ret
|
lib/oembed/utils.py | goztrk/django-htk | 206 | 12724839 | # Python Standard Library Imports
import re
# Third Party (PyPI) Imports
import requests
import rollbar
import six.moves.urllib as urllib
# HTK Imports
from htk.lib.oembed.cachekeys import OembedResponseCache
from htk.lib.oembed.constants import *
from htk.utils.request import get_current_request
def get_oembed_html(url, autoplay=False):
"""Gets the oEmbed HTML for a URL, if it is an oEmbed type
"""
oembed_type = get_oembed_type(url)
if oembed_type:
if oembed_type == 'youtube':
html = youtube_oembed(url, autoplay=autoplay)
else:
html = get_oembed_html_for_service(url, oembed_type)
else:
html = None
return html
def get_oembed_html_for_service(url, service):
"""Returns the oEmbed HTML for `service` (YouTube, Vimeo, etc)
Makes an HTTP request, so we should probably cache its response
"""
c = OembedResponseCache(prekey=url)
html = c.get()
if html is None:
request = None
success = False
try:
oembed_base_url = OEMBED_BASE_URLS[service]
oembed_url = oembed_base_url % {
'url' : urllib.parse.quote(url),
}
response = requests.get(oembed_url)
if response.status_code >= 400:
pass
else:
data = response.json()
html = data['html']
c.cache_store(html)
success = True
except:
request = get_current_request()
extra_data = {
'message' : 'Bad oembed URL',
'oembed_url' : oembed_url,
'url' : url,
'response' : {
'status_code' : response.status_code,
'content' : response.content,
}
}
rollbar.report_exc_info(level='warning', request=request, extra_data=extra_data)
if success:
pass
else:
html = '<a href="%(url)s" target="_blank">%(url)s</a>' % {
'url' : url,
}
else:
pass
return html
def get_oembed_type(url):
"""Determines the type of oEmbed this URL is, if it exists
"""
oembed_type = None
for service, pattern in OEMBED_URL_SCHEME_REGEXPS.items():
if re.match(pattern, url, flags=re.I):
oembed_type = service
break
return oembed_type
def youtube_oembed(url, autoplay=False):
html = get_oembed_html_for_service(url, 'youtube')
if autoplay:
replacement = '?feature=oembed&autoplay=1&rel=0&modestbranding=1'
else:
replacement = '?feature=oembed&rel=0&modestbranding=1'
html = re.sub(
r'\?feature=oembed',
replacement,
html
)
return html
def youtube_oembed_autoplay(url):
html = youtube_oembed(url, autoplay=True)
return html
|
opts.py | Nitin-Mane/dense-ulearn-vos | 157 | 12724853 | """
Copyright (c) 2021 TU Darmstadt
Author: <NAME> <<EMAIL>>
License: Apache License 2.0
"""
from __future__ import print_function
import os
import torch
import argparse
from core.config import cfg
def add_global_arguments(parser):
#
# Model details
#
parser.add_argument("--snapshot-dir", type=str, default='./snapshots',
help="Where to save snapshots of the model.")
parser.add_argument("--logdir", type=str, default='./logs',
help="Where to save log files of the model.")
parser.add_argument("--exp", type=str, default="main",
help="ID of the experiment (multiple runs)")
parser.add_argument("--run", type=str, help="ID of the run")
parser.add_argument('--workers', type=int, default=8,
metavar='N', help='dataloader threads')
parser.add_argument('--seed', default=64, type=int, help='seed for initializing training. ')
#
# Inference only
#
parser.add_argument("--infer-list", default="voc12/val.txt", type=str)
parser.add_argument('--mask-output-dir', type=str, default=None, help='path where to save masks')
parser.add_argument("--resume", type=str, default=None, help="Snapshot \"ID,iter\" to load")
#
# Configuration
#
parser.add_argument(
'--cfg', dest='cfg_file', required=True,
help='Config file for training (and optionally testing)')
parser.add_argument(
'--set', dest='set_cfgs',
help='Set config keys. Key value sequence seperate by whitespace.'
'e.g. [key] [value] [key] [value]',
default=[], nargs='+')
def maybe_create_dir(path):
if not os.path.exists(path):
os.makedirs(path)
def check_global_arguments(args):
args.cuda = torch.cuda.is_available()
print("Available threads: ", torch.get_num_threads())
args.logdir = os.path.join(args.logdir, args.exp, args.run)
maybe_create_dir(args.logdir)
#
# Model directories
#
args.snapshot_dir = os.path.join(args.snapshot_dir, args.exp, args.run)
maybe_create_dir(args.snapshot_dir)
def get_arguments(args_in):
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Dense Unsupervised Learning for Video Segmentation")
add_global_arguments(parser)
args = parser.parse_args(args_in)
check_global_arguments(args)
return args
|
maro/cli/inspector/visualization.py | yangboz/maro | 598 | 12724866 | <reponame>yangboz/maro
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
from maro.cli.inspector.cim_dashboard import start_cim_dashboard
from maro.cli.inspector.citi_bike_dashboard import start_citi_bike_dashboard
from maro.cli.inspector.params import GlobalScenarios
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--source_path", type=str)
parser.add_argument("--scenario", type=str)
parser.add_argument("--epoch_num", type=int)
parser.add_argument("--prefix", type=str)
args = parser.parse_args()
source_path = args.source_path
scenario = GlobalScenarios(args.scenario)
epoch_num = args.epoch_num
prefix = args.prefix
if scenario == GlobalScenarios.CIM:
start_cim_dashboard(source_path, epoch_num, prefix)
elif scenario == GlobalScenarios.CITI_BIKE:
start_citi_bike_dashboard(source_path, epoch_num, prefix)
|
ajenti-core/aj/security/verifier.py | ajenti/ajen | 3,777 | 12724876 | from jadi import service
import aj
@service
class ClientCertificateVerificator():
def __init__(self, context):
self.context = context
def verify(self, x509):
serial = x509.get_serial_number()
digest = x509.digest('sha1')
# logging.debug('SSL verify: %s / %s' % (x509.get_subject(), digest))
for c in aj.config.data['ssl']['client_auth']['certificates']:
if int(c['serial']) == serial and c['digest'].encode('utf-8') == digest:
return c['user']
|
pandapower/test/loadflow/PF_Results.py | yougnen/pandapower | 104 | 12724904 | import numpy as np
def get_PF_Results():
results=\
{
10:
{
0:
{
'delta' :
{
'Yyn': np.array
([
#10,0,deltaYyn
#BusTr_HV,Tr_LV,Load
1.0000001787261197, 0.9990664471050634, 0.9408623912831601,
0.9999997973033823, 0.9989329879720452, 0.9398981202882926,
1.000000023970535, 0.9990124767159095, 0.9422153531204793,
] )
,
'YNyn': np.array
([
#10,0,deltaYNyn
#BusTr_HV,Tr_LV,Load
1.0000001786899793, 0.9990638105447855, 0.9408586320432043,
0.9999997971517767, 0.9989338020819162, 0.9398997093459485,
1.000000024158281, 0.9990142941344189, 0.9422174830541402,
] )
,
'Dyn': np.array
([
#10,0,deltaDyn
#BusTr_HV,Tr_LV,Load
1.000000178603741, 0.9990638106892, 0.9408586322473715,
0.9999997971832201, 0.9989338020666364, 0.9398997093074486,
1.000000024213076, 0.9990142940055439, 0.9422174828921106,
] )
,
'Yzn': np.array
([
#10,0,deltaYzn
#BusTr_HV,Tr_LV,Load
1.000000178603741, 0.9990638106892, 0.9408586322473715,
0.9999997971832201, 0.9989338020666364, 0.9398997093074486,
1.000000024213076, 0.9990142940055439, 0.9422174828921106,
] )
,
},
'wye' :
{
'Yyn': np.array
([
#10,0,wyeYyn
#BusTr_HV,Tr_LV,Load
0.9999998021362442, 0.9915031010358111, 0.9206318374527404,
0.9999997791045989, 1.0143417780460269, 0.9616365638634155,
1.000000418759289, 0.9913387390190033, 0.9408558778822637,
] )
,
'YNyn': np.array
([
#10,0,wyeYNyn
#BusTr_HV,Tr_LV,Load
0.9999997083766274, 0.9988968962217385, 0.9287452455114519,
1.0000001672319114, 0.999061839981782, 0.9452915718541725,
1.0000001243918462, 0.9990504923797096, 0.9488965582258678,
] )
,
'Dyn': np.array
([
#10,0,wyeDyn
#BusTr_HV,Tr_LV,Load
0.9999999599731432, 0.9988963012384348, 0.9287445940341739,
0.999999734429128, 0.9990625733649781, 0.9452923634430362,
1.000000305597812, 0.9990503538577492, 0.9488964199625295,
] )
,
'Yzn': np.array
([
#10,0,wyeYzn
#BusTr_HV,Tr_LV,Load
0.9999999599731432, 0.9988963012384348, 0.9287445940341739,
0.999999734429128, 0.9990625733649781, 0.9452923634430362,
1.000000305597812, 0.9990503538577492, 0.9488964199625295,
] )
,
},
'delta_wye' :
{
'Yyn': np.array
([
#10,0,delta_wyeYyn
#BusTr_HV,Tr_LV,Load
1.000000289039923, 0.9945259444558469, 0.9241479442057374,
0.9999996598061066, 1.0028660964609941, 0.9332827547884484,
1.0000000511540714, 0.9989227003917809, 0.9366758414321353,
] )
,
'YNyn': np.array
([
#10,0,delta_wyeYNyn
#BusTr_HV,Tr_LV,Load
1.0000001633660651, 0.9988186334488024, 0.9284513283443013,
0.9999997731436624, 0.9986857571039884, 0.9290168825920521,
1.0000000634904662, 0.9987917974558278, 0.9366076053493121,
] )
,
'Dyn': np.array
([
#10,0,delta_wyeDyn
#BusTr_HV,Tr_LV,Load
1.0000002947774138, 0.9988183812973129, 0.928451074375663,
0.9999996601592913, 0.9986859152711799, 0.9290170457925304,
1.0000000450633972, 0.9987918914643369, 0.936607696605823,
] )
,
'Yzn': np.array
([
#10,0,delta_wyeYzn
#BusTr_HV,Tr_LV,Load
1.0000002947774138, 0.9988183812973129, 0.928451074375663,
0.9999996601592913, 0.9986859152711799, 0.9290170457925304,
1.0000000450633972, 0.9987918914643369, 0.936607696605823,
] )
,
},
'bal_wye' :
{
'Yyn': np.array
([
#10,0,bal_wyeYyn
#BusTr_HV,Tr_LV,Load
0.9999999999999879, 0.9990668908275987, 0.9446728357045939,
0.9999999999999739, 0.9990668910254652, 0.9446728363197381,
1.0000000000000384, 0.9990668908667012, 0.9446728362625954,
] )
,
'YNyn': np.array
([
#10,0,bal_wyeYNyn
#BusTr_HV,Tr_LV,Load
0.9999999999999863, 0.9990668909016067, 0.9446728357836535,
0.9999999999999772, 0.9990668908990621, 0.9446728361848189,
1.0000000000000362, 0.9990668909190944, 0.9446728363184529,
] )
,
'Dyn': np.array
([
#10,0,bal_wyeDyn
#BusTr_HV,Tr_LV,Load
0.999999999999989, 0.999066890901618, 0.9446728357836652,
0.9999999999999737, 0.999066890899081, 0.9446728361848393,
1.0000000000000375, 0.999066890919066, 0.9446728363184226,
] )
,
'Yzn': np.array
([
#10,0,bal_wyeYzn
#BusTr_HV,Tr_LV,Load
0.999999999999989, 0.999066890901618, 0.9446728357836652,
0.9999999999999737, 0.999066890899081, 0.9446728361848393,
1.0000000000000375, 0.999066890919066, 0.9446728363184226,
] )
,
},
},
1:
{
'delta' :
{
'Yyn': np.array
([
#10,1,deltaYyn
#BusTr_HV,Tr_LV,Load
1.0000001795040512, 1.0240495841864894, 0.9674397511496959,
0.9999997971910463, 1.0239111614639989, 0.9664923222986317,
1.0000000233049395, 1.0239935208058917, 0.9687543048259518,
] )
,
'YNyn': np.array
([
#10,1,deltaYNyn
#BusTr_HV,Tr_LV,Load
1.0000001782704175, 1.0240459468337655, 0.9674352916726019,
0.9999997977852046, 1.0239130527637306, 0.9664952324047731,
1.0000000239444145, 1.023995255504894, 0.9687558295327158,
] )
,
'Dyn': np.array
([
#10,1,deltaDyn
#BusTr_HV,Tr_LV,Load
1.0000001782214243, 1.024045946940332, 0.967435291834159,
0.9999997978066542, 1.0239130527420286, 0.9664952323430777,
1.0000000239719584, 1.023995255420507, 0.9687558294364838,
] )
,
'Yzn': np.array
([
#10,1,deltaYzn
#BusTr_HV,Tr_LV,Load
1.0000001782214243, 1.024045946940332, 0.967435291834159,
0.9999997978066542, 1.0239130527420286, 0.9664952323430777,
1.0000000239719584, 1.023995255420507, 0.9687558294364838,
] )
,
},
'wye' :
{
'Yyn': np.array
([
#10,1,wyeYyn
#BusTr_HV,Tr_LV,Load
0.9999998049723338, 1.0163471727161444, 0.9474851372085454,
0.9999997835047069, 1.0396033478524176, 0.9883119194148919,
1.0000004115230865, 1.016177862041642, 0.9670415224711911,
] )
,
'YNyn': np.array
([
#10,1,wyeYNyn
#BusTr_HV,Tr_LV,Load
0.9999997111904564, 1.023876123903735, 0.9557104532156954,
1.000000169840967, 1.024045000904823, 0.97172789408756,
1.0000001189689527, 1.024030547850082, 0.9752090807560196,
] )
,
'Dyn': np.array
([
#10,1,wyeDyn
#BusTr_HV,Tr_LV,Load
0.9999999610844935, 1.0238755180281829, 0.9557097928361534,
0.9999997396431541, 1.0240457481759326, 0.9717286975282872,
1.0000002992724317, 1.0240304063318828, 0.975208939465858,
] )
,
'Yzn': np.array
([
#10,1,wyeYzn
#BusTr_HV,Tr_LV,Load
0.9999999610844935, 1.0238755180281829, 0.9557097928361534,
0.9999997396431541, 1.0240457481759326, 0.9717286975282872,
1.0000002992724317, 1.0240304063318828, 0.975208939465858,
] )
,
},
'delta_wye' :
{
'Yyn': np.array
([
#10,1,delta_wyeYyn
#BusTr_HV,Tr_LV,Load
1.0000002896605282, 1.0194026014413138, 0.9509830141499932,
0.9999996606572187, 1.0279455302463374, 0.9603073239465667,
1.0000000496823542, 1.0238970684816717, 0.9633884768515291,
] )
,
'YNyn': np.array
([
#10,1,delta_wyeYNyn
#BusTr_HV,Tr_LV,Load
1.0000001631049464, 1.0237965435008547, 0.9553922424619002,
0.9999997741736003, 1.0236607923322103, 0.9559358029296258,
1.000000062721646, 1.0237688359303385, 0.9633200580357987,
] )
,
'Dyn': np.array
([
#10,1,delta_wyeDyn
#BusTr_HV,Tr_LV,Load
1.0000002940160242, 1.023796285978077, 0.9553919829548445,
0.9999996614657936, 1.0236609541452617, 0.9559359697011912,
1.000000044518284, 1.0237689316654306, 0.9633201512377196,
] )
,
'Yzn': np.array
([
#10,1,delta_wyeYzn
#BusTr_HV,Tr_LV,Load
1.0000002940160242, 1.023796285978077, 0.9553919829548445,
0.9999996614657936, 1.0236609541452617, 0.9559359697011912,
1.000000044518284, 1.0237689316654306, 0.9633201512377196,
] )
,
},
'bal_wye' :
{
'Yyn': np.array
([
#10,1,bal_wyeYyn
#BusTr_HV,Tr_LV,Load
0.99999999999999, 1.02404859308445, 0.971134029249497,
0.9999999999999845, 1.0240485931685195, 0.9711340295967834,
1.0000000000000258, 1.0240485931044616, 0.9711340295607079,
] )
,
'YNyn': np.array
([
#10,1,bal_wyeYNyn
#BusTr_HV,Tr_LV,Load
0.9999999999999892, 1.0240485931151249, 0.9711340292823146,
0.9999999999999865, 1.024048593114567, 0.9711340295398108,
1.0000000000000244, 1.0240485931277552, 0.9711340295848808,
] )
,
'Dyn': np.array
([
#10,1,bal_wyeDyn
#BusTr_HV,Tr_LV,Load
0.9999999999999902, 1.024048593115119, 0.9711340292823075,
0.9999999999999848, 1.0240485931145844, 0.9711340295398292,
1.0000000000000249, 1.024048593127728, 0.9711340295848522,
] )
,
'Yzn': np.array
([
#10,1,bal_wyeYzn
#BusTr_HV,Tr_LV,Load
0.9999999999999902, 1.024048593115119, 0.9711340292823075,
0.9999999999999848, 1.0240485931145844, 0.9711340295398292,
1.0000000000000249, 1.024048593127728, 0.9711340295848522,
] )
,
},
},
},
11:
{
0:
{
'delta' :
{
'Yyn': np.array
([
#11,0,deltaYyn
#BusTr_HV,Tr_LV,Load
1.0000001770832512, 1.0991666419999009, 1.046863039382953,
0.9999997998271506, 1.0990478952608114, 1.0459974904307656,
1.0000000230896342, 1.0991196058562567, 1.0480820977965253,
] )
,
'YNyn': np.array
([
#11,0,deltaYNyn
#BusTr_HV,Tr_LV,Load
1.000000177064337, 1.0991653032170863, 1.0468611006390927,
0.9999997997417357, 1.0990483460592901, 1.0459983357170173,
1.0000000231939636, 1.0991204912844936, 1.0480831713683516,
] )
,
'Dyn': np.array
([
#11,0,deltaDyn
#BusTr_HV,Tr_LV,Load
1.0000001770170086, 1.099165303280019, 1.046861100729514,
0.9999997997589116, 1.0990483460550085, 1.0459983357036897,
1.0000000232241157, 1.0991204912259542, 1.0480831712929268,
] )
,
'Yzn': np.array
([
#11,0,deltaYzn
#BusTr_HV,Tr_LV,Load
1.0000001770170086, 1.099165303280019, 1.046861100729514,
0.9999997997589116, 1.0990483460550085, 1.0459983357036897,
1.0000000232241157, 1.0991204912259542, 1.0480831712929268,
] )
,
},
'wye' :
{
'Yyn': np.array
([
#11,0,wyeYyn
#BusTr_HV,Tr_LV,Load
0.9999998409135958, 1.0924753274233265, 1.0291805067306592,
0.9999997887228856, 1.112638254093763, 1.0649872145063082,
1.0000003703636224, 1.0923417509837368, 1.0468846408299153,
] )
,
'YNyn': np.array
([
#11,0,wyeYNyn
#BusTr_HV,Tr_LV,Load
0.9999997198861459, 1.0990179190476412, 1.0362148303868974,
1.0000001764446427, 1.0991669773561135, 1.0507765134998273,
1.0000001036695618, 1.0991473807202723, 1.0539233691792418,
] )
,
'Dyn': np.array
([
#11,0,wyeDyn
#BusTr_HV,Tr_LV,Load
0.9999999645965844, 1.0990174387140366, 1.036214314982853,
0.9999997540341666, 1.0991675482923782, 1.0507771199594842,
1.0000002813693196, 1.0991472900387962, 1.0539232794875342,
] )
,
'Yzn': np.array
([
#11,0,wyeYzn
#BusTr_HV,Tr_LV,Load
0.9999999645965844, 1.0990174387140366, 1.036214314982853,
0.9999997540341666, 1.0991675482923782, 1.0507771199594842,
1.0000002813693196, 1.0991472900387962, 1.0539232794875342,
] )
,
},
'delta_wye' :
{
'Yyn': np.array
([
#11,0,delta_wyeYyn
#BusTr_HV,Tr_LV,Load
1.0000002867915057, 1.09511471406464, 1.0320045668742739,
0.9999996655448716, 1.102582851029247, 1.0401766570762196,
1.0000000476637207, 1.0990187740288424, 1.0431968194073924,
] )
,
'YNyn': np.array
([
#11,0,delta_wyeYNyn
#BusTr_HV,Tr_LV,Load
1.0000001623852481, 1.0989490480618516, 1.0358488170212126,
0.9999997776678232, 1.098829878782537, 1.0363599386677118,
1.0000000599471168, 1.0989238972185933, 1.0431472226133363,
] )
,
'Dyn': np.array
([
#11,0,delta_wyeDyn
#BusTr_HV,Tr_LV,Load
1.000000291479138, 1.0989488469146447, 1.0358486145520418,
0.9999996659434413, 1.0988300000349813, 1.0363600632236267,
1.0000000425775202, 1.098923977128452, 1.0431473008280179,
] )
,
'Yzn': np.array
([
#11,0,delta_wyeYzn
#BusTr_HV,Tr_LV,Load
1.000000291479138, 1.0989488469146447, 1.0358486145520418,
0.9999996659434413, 1.0988300000349813, 1.0363600632236267,
1.0000000425775202, 1.098923977128452, 1.0431473008280179,
] )
,
},
'bal_wye' :
{
'Yyn': np.array
([
#11,0,bal_wyeYyn
#BusTr_HV,Tr_LV,Load
0.999999999999994, 1.0991663222840553, 1.0502483483014522,
0.999999999999986, 1.0991663223629755, 1.0502483485683893,
1.00000000000002, 1.0991663223022374, 1.0502483485566558,
] )
,
'YNyn': np.array
([
#11,0,bal_wyeYNyn
#BusTr_HV,Tr_LV,Load
0.9999999999999934, 1.0991663223142185, 1.050248348333234,
0.9999999999999878, 1.0991663223125718, 1.0502483485153113,
1.000000000000019, 1.0991663223224817, 1.0502483485779557,
] )
,
'Dyn': np.array
([
#11,0,bal_wyeDyn
#BusTr_HV,Tr_LV,Load
0.9999999999999944, 1.099166322314217, 1.0502483483332314,
0.999999999999986, 1.0991663223125883, 1.050248348515329,
1.0000000000000195, 1.099166322322463, 1.0502483485779364,
] )
,
'Yzn': np.array
([
#11,0,bal_wyeYzn
#BusTr_HV,Tr_LV,Load
0.9999999999999944, 1.099166322314217, 1.0502483483332314,
0.999999999999986, 1.0991663223125883, 1.050248348515329,
1.0000000000000195, 1.099166322322463, 1.0502483485779364,
] )
,
},
},
1:
{
'delta' :
{
'Yyn': np.array
([
#11,1,deltaYyn
#BusTr_HV,Tr_LV,Load
1.000000177759738, 1.1266508599188314, 1.075749945733859,
0.9999997996753168, 1.1265276819882335, 1.0748995015125222,
1.0000000225649812, 1.1266018378562361, 1.076934372664356,
] )
,
'YNyn': np.array
([
#11,1,deltaYNyn
#BusTr_HV,Tr_LV,Load
1.000000176730594, 1.1266486259211201, 1.0757473443700512,
0.9999998002521623, 1.1265290107226675, 1.0749013345769867,
1.0000000230172796, 1.1266027366684568, 1.0769351304583261,
] )
,
'Dyn': np.array
([
#11,1,deltaDyn
#BusTr_HV,Tr_LV,Load
1.0000001767039686, 1.1266486259729462, 1.0757473444450258,
0.9999998002646232, 1.1265290107113315, 1.0749013345478544,
1.0000000230314439, 1.126602736628164, 1.0769351304141572,
] )
,
'Yzn': np.array
([
#11,1,deltaYzn
#BusTr_HV,Tr_LV,Load
1.0000001767039686, 1.1266486259729462, 1.0757473444450258,
0.9999998002646232, 1.1265290107113315, 1.0749013345478544,
1.0000000230314439, 1.126602736628164, 1.0769351304141572,
] )
,
},
'wye' :
{
'Yyn': np.array
([
#11,1,wyeYyn
#BusTr_HV,Tr_LV,Load
0.9999998425139852, 1.1198215550651343, 1.0582701679876008,
0.999999792808548, 1.1404037383383383, 1.0940119347447643,
1.000000364677568, 1.119678656475928, 1.0754147798091545,
] )
,
'YNyn': np.array
([
#11,1,wyeYNyn
#BusTr_HV,Tr_LV,Load
0.9999997220234313, 1.1264984365036237, 1.065423794124721,
1.0000001785338588, 1.126651120595415, 1.0795452055229118,
1.0000000994430542, 1.126629015453866, 1.0825891788506536,
] )
,
'Dyn': np.array
([
#11,1,wyeDyn
#BusTr_HV,Tr_LV,Load
0.9999999654333293, 1.1264979466596041, 1.0654232703853377,
0.9999997580954444, 1.1266517031402583, 1.079545822405393,
1.0000002764712945, 1.1266289226736226, 1.0825890870214312,
] )
,
'Yzn': np.array
([
#11,1,wyeYzn
#BusTr_HV,Tr_LV,Load
0.9999999654333293, 1.1264979466596041, 1.0654232703853377,
0.9999997580954444, 1.1266517031402583, 1.079545822405393,
1.0000002764712945, 1.1266289226736226, 1.0825890870214312,
] )
,
},
'delta_wye' :
{
'Yyn': np.array
([
#11,1,delta_wyeYyn
#BusTr_HV,Tr_LV,Load
1.0000002872593454, 1.122503013135439, 1.061107915739188,
0.9999996662661563, 1.1301536319129346, 1.069448792307849,
1.0000000464745962, 1.1264944198323028, 1.0721922685731713,
] )
,
'YNyn': np.array
([
#11,1,delta_wyeYNyn
#BusTr_HV,Tr_LV,Load
1.0000001621739123, 1.126428316031026, 1.0650458103409908,
0.9999997785161929, 1.1263065012425137, 1.0655375147447366,
1.0000000593100822, 1.12640238251751, 1.0721435619381965,
] )
,
'Dyn': np.array
([
#11,1,delta_wyeDyn
#BusTr_HV,Tr_LV,Load
1.0000002908474748, 1.1264281104824707, 1.0650456033928053,
0.9999996670234566, 1.1263066253385652, 1.065537642082384,
1.0000000421291677, 1.126402463985756, 1.0721436418376473,
] )
,
'Yzn': np.array
([
#11,1,delta_wyeYzn
#BusTr_HV,Tr_LV,Load
1.0000002908474748, 1.1264281104824707, 1.0650456033928053,
0.9999996670234566, 1.1263066253385652, 1.065537642082384,
1.0000000421291677, 1.126402463985756, 1.0721436418376473,
] )
,
},
'bal_wye' :
{
'Yyn': np.array
([
#11,1,bal_wyeYyn
#BusTr_HV,Tr_LV,Load
0.9999999999999946, 1.126649305937712, 1.0790357881145098,
0.9999999999999919, 1.1266493059651883, 1.0790357882640247,
1.0000000000000135, 1.1266493059449603, 1.0790357882526134,
] )
,
'YNyn': np.array
([
#11,1,bal_wyeYNyn
#BusTr_HV,Tr_LV,Load
0.9999999999999944, 1.126649305947411, 1.079035788124742,
0.9999999999999928, 1.126649305946962, 1.0790357882450081,
1.000000000000013, 1.1266493059535365, 1.079035788261449,
] )
,
'Dyn': np.array
([
#11,1,bal_wyeDyn
#BusTr_HV,Tr_LV,Load
0.9999999999999944, 1.1266493059473897, 1.0790357881247188,
0.9999999999999922, 1.1266493059469642, 1.079035788245011,
1.0000000000000133, 1.1266493059535063, 1.0790357882614174,
] )
,
'Yzn': np.array
([
#11,1,bal_wyeYzn
#BusTr_HV,Tr_LV,Load
0.9999999999999944, 1.1266493059473897, 1.0790357881247188,
0.9999999999999922, 1.1266493059469642, 1.079035788245011,
1.0000000000000133, 1.1266493059535063, 1.0790357882614174,
] )
,
},
},
},
}
return results |
samples/lightning/lit_mnist.py | elgalu/labml | 463 | 12724927 | <reponame>elgalu/labml
"""
Modified from https://colab.research.google.com/github/PytorchLightning/pytorch-lightning/blob/master/notebooks/01-mnist-hello-world.ipynb
Added labml logger
"""
import pytorch_lightning as pl
import torch
from pytorch_lightning.metrics.functional import accuracy
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader, random_split
from torchvision import transforms
from torchvision.datasets import MNIST
from labml import lab, experiment
from labml.utils.lightning import LabMLLightningLogger
class LitMNIST(pl.LightningModule):
def __init__(self, hidden_size=64, learning_rate=2e-4):
super().__init__()
# Set our init args as class attributes
self.hidden_size = hidden_size
self.learning_rate = learning_rate
# Hardcode some dataset specific attributes
self.num_classes = 10
self.dims = (1, 28, 28)
channels, width, height = self.dims
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
# Define PyTorch model
self.model = nn.Sequential(
nn.Flatten(),
nn.Linear(channels * width * height, hidden_size),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(hidden_size, self.num_classes)
)
def forward(self, x):
x = self.model(x)
return F.log_softmax(x, dim=1)
def training_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
preds = torch.argmax(logits, dim=1)
acc = accuracy(preds, y)
self.log('loss.train', loss)
self.log('accuracy.train', acc)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
preds = torch.argmax(logits, dim=1)
acc = accuracy(preds, y)
# Calling self.log will surface up scalars for you in TensorBoard
self.log('loss.valid', loss)
self.log('accuracy.valid', acc)
return loss
def test_step(self, batch, batch_idx):
# Here we just reuse the validation_step for testing
return self.validation_step(batch, batch_idx)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
return optimizer
####################
# DATA RELATED HOOKS
####################
def prepare_data(self):
# download
MNIST(str(lab.get_data_path()), train=True, download=True)
MNIST(str(lab.get_data_path()), train=False, download=True)
def setup(self, stage=None):
# Assign train/val datasets for use in dataloaders
if stage == 'fit' or stage is None:
mnist_full = MNIST(str(lab.get_data_path()), train=True, transform=self.transform)
self.mnist_train, self.mnist_val = random_split(mnist_full, [55000, 5000])
# Assign test dataset for use in dataloader(s)
if stage == 'test' or stage is None:
self.mnist_test = MNIST(str(lab.get_data_path()), train=False, transform=self.transform)
def train_dataloader(self):
return DataLoader(self.mnist_train, batch_size=32)
def val_dataloader(self):
return DataLoader(self.mnist_val, batch_size=32)
def test_dataloader(self):
return DataLoader(self.mnist_test, batch_size=32)
def main():
experiment.create(name='mnist_lit_lightening', disable_screen=True)
model = LitMNIST()
trainer = pl.Trainer(gpus=1, max_epochs=3, progress_bar_refresh_rate=20, logger=LabMLLightningLogger())
with experiment.start():
trainer.fit(model)
if __name__ == '__main__':
main()
|
tests/modules/span_extractors/self_attentive_span_extractor_test.py | MSLars/allennlp | 11,433 | 12724935 | import numpy
import torch
from allennlp.modules.span_extractors import SpanExtractor, SelfAttentiveSpanExtractor
from allennlp.common.params import Params
class TestSelfAttentiveSpanExtractor:
def test_locally_normalised_span_extractor_can_build_from_params(self):
params = Params(
{
"type": "self_attentive",
"input_dim": 7,
"num_width_embeddings": 5,
"span_width_embedding_dim": 3,
}
)
extractor = SpanExtractor.from_params(params)
assert isinstance(extractor, SelfAttentiveSpanExtractor)
assert extractor.get_output_dim() == 10 # input_dim + span_width_embedding_dim
def test_attention_is_normalised_correctly(self):
input_dim = 7
sequence_tensor = torch.randn([2, 5, input_dim])
extractor = SelfAttentiveSpanExtractor(input_dim=input_dim)
assert extractor.get_output_dim() == input_dim
assert extractor.get_input_dim() == input_dim
# In order to test the attention, we'll make the weight which computes the logits
# zero, so the attention distribution is uniform over the sentence. This lets
# us check that the computed spans are just the averages of their representations.
extractor._global_attention._module.weight.data.fill_(0.0)
extractor._global_attention._module.bias.data.fill_(0.0)
indices = torch.LongTensor(
[[[1, 3], [2, 4]], [[0, 2], [3, 4]]]
) # smaller span tests masking.
span_representations = extractor(sequence_tensor, indices)
assert list(span_representations.size()) == [2, 2, input_dim]
# First element in the batch.
batch_element = 0
spans = span_representations[batch_element]
# First span.
mean_embeddings = sequence_tensor[batch_element, 1:4, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[0].data.numpy(), mean_embeddings.data.numpy())
# Second span.
mean_embeddings = sequence_tensor[batch_element, 2:5, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[1].data.numpy(), mean_embeddings.data.numpy())
# Now the second element in the batch.
batch_element = 1
spans = span_representations[batch_element]
# First span.
mean_embeddings = sequence_tensor[batch_element, 0:3, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[0].data.numpy(), mean_embeddings.data.numpy())
# Second span.
mean_embeddings = sequence_tensor[batch_element, 3:5, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[1].data.numpy(), mean_embeddings.data.numpy())
# Now test the case in which we have some masked spans in our indices.
indices_mask = torch.tensor([[True, True], [True, False]])
span_representations = extractor(sequence_tensor, indices, span_indices_mask=indices_mask)
# First element in the batch.
batch_element = 0
spans = span_representations[batch_element]
# First span.
mean_embeddings = sequence_tensor[batch_element, 1:4, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[0].data.numpy(), mean_embeddings.data.numpy())
# Second span.
mean_embeddings = sequence_tensor[batch_element, 2:5, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[1].data.numpy(), mean_embeddings.data.numpy())
# Now the second element in the batch.
batch_element = 1
spans = span_representations[batch_element]
# First span.
mean_embeddings = sequence_tensor[batch_element, 0:3, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[0].data.numpy(), mean_embeddings.data.numpy())
# Second span was masked, so should be completely zero.
numpy.testing.assert_array_almost_equal(spans[1].data.numpy(), numpy.zeros([input_dim]))
def test_widths_are_embedded_correctly(self):
input_dim = 7
max_span_width = 5
span_width_embedding_dim = 3
output_dim = input_dim + span_width_embedding_dim
extractor = SelfAttentiveSpanExtractor(
input_dim=input_dim,
num_width_embeddings=max_span_width,
span_width_embedding_dim=span_width_embedding_dim,
)
assert extractor.get_output_dim() == output_dim
assert extractor.get_input_dim() == input_dim
sequence_tensor = torch.randn([2, max_span_width, input_dim])
indices = torch.LongTensor(
[[[1, 3], [0, 4], [0, 0]], [[0, 2], [1, 4], [2, 2]]]
) # smaller span tests masking.
span_representations = extractor(sequence_tensor, indices)
assert list(span_representations.size()) == [2, 3, output_dim]
width_embeddings = extractor._span_width_embedding.weight.data.numpy()
widths_minus_one = indices[..., 1] - indices[..., 0]
for element in range(indices.size(0)):
for span in range(indices.size(1)):
width = widths_minus_one[element, span].item()
width_embedding = span_representations[element, span, input_dim:]
numpy.testing.assert_array_almost_equal(
width_embedding.data.numpy(), width_embeddings[width]
)
|
tests/__init__.py | iiiusky/Sasila | 327 | 12724938 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest2 as unittest
all_suite = unittest.TestLoader().discover(os.path.dirname(__file__), "test_*.py")
|
test/mitmproxy/proxy/layers/test_socks5_fuzz.py | KarlParkinson/mitmproxy | 24,939 | 12724966 | from hypothesis import given
from hypothesis.strategies import binary
from mitmproxy import options
from mitmproxy.connection import Client
from mitmproxy.proxy.context import Context
from mitmproxy.proxy.events import DataReceived
from mitmproxy.proxy.layers.modes import Socks5Proxy
opts = options.Options()
tctx = Context(Client(("client", 1234), ("127.0.0.1", 8080), 1605699329), opts)
@given(binary())
def test_socks5_fuzz(data):
layer = Socks5Proxy(tctx)
list(layer.handle_event(DataReceived(tctx.client, data)))
|
elliot/recommender/content_based/VSM/__init__.py | gategill/elliot | 175 | 12724969 | from .vector_space_model import VSM |
tests/__init__.py | asmeurer/nikola | 1,901 | 12724971 | """Tests for Nikola."""
|
components/isceobj/Util/geo/exceptions.py | vincentschut/isce2 | 1,133 | 12724976 | #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2012 California Institute of Technology. ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# United States Government Sponsorship acknowledged. This software is subject to
# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
# (No [Export] License Required except when exporting to an embargoed country,
# end user, or in support of a prohibited end use). By downloading this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# The user has the responsibility to obtain export licenses, or other export
# authority as may be required before exporting this software to any 'EAR99'
# embargoed foreign country or citizen of those countries.
#
# Author: <NAME>
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""Some specialized arithmetic exceptions for
Vector and Affine Spaces.
"""
## \namespace geo::exceptions
## <a href="http://docs.python.org/2/library/exceptions.html">Exceptions</a>
## for Vector and Affines spaces.
## Base class for geometric errors
class GeometricException(ArithmeticError):
"""A base class- not to be raised"""
pass
## A reminder to treat geometric objects properly.
class NonCovariantOperation(GeometricException):
"""Raise when you do something that is silly[1], like adding
a Scalar to a Vector\.
[1]Silly: (adj.) syn: non-covariant"""
pass
## A reminder that Affine space are affine, and vector spaces are not.
class AffineSpaceError(GeometricException):
"""Raised when you forget the points in an affine space are
not vector in a vector space, and visa versa"""
pass
## A catch-all for overlaoded operations getting non-sense.
class UndefinedGeometricOperation(GeometricException):
"""This will raised if you get do an opeation that has been defined for
a Tensor/Affine/Coordinate argument, but you just have a non-sense
combinabtion, like vector**vector.
"""
pass
## This function should make a generic error message
def error_message(op, left, right):
"""message = error_message(op, left, right)
op is a method or a function
left is a geo object
right is probably a geo object.
message is what did not work
"""
return "%s(%s, %s)"%(op.__name__,
left.__class__.__name__,
right.__class__.__name__)
|
tools/gen_header_v3.py | Kill-Console/xresloader | 219 | 12724987 | <reponame>Kill-Console/xresloader<filename>tools/gen_header_v3.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import string
import glob
import sys
from subprocess import Popen
work_dir = os.getcwd()
script_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(script_dir)
os.chdir(script_dir)
os.chdir(os.path.join('..'))
project_dir = os.getcwd()
proto_dir = os.path.join(project_dir, 'header')
proto_file = os.path.join(proto_dir, 'pb_header_v3.proto')
extension_proto_file = glob.glob(os.path.join(proto_dir, 'extensions', 'v3', '*.proto'))
os.chdir(work_dir)
java_out_dir = proto_dir
pb_out_file = os.path.join(proto_dir, 'pb_header_v3.pb')
from find_protoc import find_protoc
common_args = [
"-I", os.path.join(proto_dir, 'extensions', 'v3'),
"-I", os.path.join(proto_dir, 'extensions'),
"-I", os.path.join(proto_dir)
]
# java 文件为非LITE版本
print('[PROCESS] generate java source ... ')
Popen(
[
find_protoc(), *common_args,
'--java_out', java_out_dir,
proto_file, *extension_proto_file
],
cwd=os.path.join(proto_dir, 'extensions'),
shell=False).wait()
print('[PROCESS] generate java source done.')
# pb 文件为LITE版本
print('[PROCESS] generate proto pb file ... ')
Popen(
[
find_protoc(), *common_args,
'-o', pb_out_file,
proto_file
],
shell=False).wait()
print('[PROCESS] generate proto pb file done.')
# pb 文件为LITE版本
print('[PROCESS] generate proto pb file ... ')
Popen(
[
find_protoc(),
"-I", os.path.join(proto_dir, 'extensions', 'v3'),
"-I", os.path.join(proto_dir, 'extensions'),
'-o', os.path.join(script_dir, 'extensions.pb'),
*extension_proto_file, *glob.glob(os.path.join(proto_dir, 'extensions','google', 'protobuf', '*.proto'))
],
shell=False).wait()
print('[PROCESS] generate protobuf.pb file done.')
|
Python/ch6-1_b.py | andjor/deep-learning-with-csharp-and-cntk | 120 | 12725039 | import time
import datetime
import os
import sys
import numpy as np
use_cntk = True
if use_cntk:
try:
base_directory = os.path.split(sys.executable)[0]
os.environ['PATH'] += ';' + base_directory
import cntk
os.environ['KERAS_BACKEND'] = 'cntk'
except ImportError:
print('CNTK not installed')
else:
os.environ['KERAS_BACKEND'] = 'tensorflow'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import keras
def learning_word_embeddings_with_the_embedding_layer():
# Number of words to consider as features
max_features = 10000
# Cut texts after this number of words
# (among top max_features most common words)
maxlen = 20
# Load the data as lists of integers.
(x_train, y_train), (x_test, y_test) = keras.datasets.imdb.load_data(num_words=max_features)
# This turns our lists of integers
# into a 2D integer tensor of shape `(samples, maxlen)`
x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = keras.preprocessing.sequence.pad_sequences(x_test, maxlen=maxlen)
model = keras.models.Sequential()
# We specify the maximum input length to our Embedding layer
# so we can later flatten the embedded inputs
model.add(keras.layers.Embedding(max_features, 8, input_length=maxlen))
# After the Embedding layer,
# our activations have shape `(samples, maxlen, 8)`.
# We flatten the 3D tensor of embeddings
# into a 2D tensor of shape `(samples, maxlen * 8)`
model.add(keras.layers.Flatten())
# We add the classifier on top
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
model.summary()
history = model.fit(x_train, y_train, epochs=10, batch_size=32, validation_split=0.2)
def learning_word_embeddings_with_the_embedding_layer_cntk():
x_train, y_train, x_test, y_test = load_from_files()
max_features = 10000
maxlen = 20
embedding_dim = 8
x = cntk.input_variable(shape=(maxlen,), dtype=np.float32)
y = cntk.input_variable(shape=(1,), dtype=np.float32)
model = cntk.one_hot(x, num_classes=max_features, sparse_output=True)
model = cntk.layers.Embedding(embedding_dim)(model)
model = cntk.layers.Dense(1, activation=cntk.sigmoid)(model)
loss_function = cntk.binary_cross_entropy(model.output, y)
round_predictions = cntk.round(model.output)
equal_elements = cntk.equal(round_predictions, y)
accuracy_function = cntk.reduce_mean(equal_elements, axis=0)
max_epochs = 30
batch_size = 32
learner = cntk.adam(model.parameters, cntk.learning_parameter_schedule_per_sample(0.0001), cntk.learning_parameter_schedule_per_sample(0.99))
progress_printer = cntk.logging.ProgressPrinter(tag='Training', num_epochs=max_epochs)
trainer = cntk.Trainer(model, (loss_function, accuracy_function), [learner], progress_printer)
evaluator = cntk.Evaluator(accuracy_function)
cntk_train(x, y, x_train, y_train, max_epochs, batch_size, trainer, evaluator)
def cntk_train(x, y, x_train, y_train, max_epochs, batch_size, trainer, evaluator):
N = len(x_train)
y_train = np.expand_dims(y_train, axis=1)
train_features = x_train[:int(N*0.8)]
train_labels = y_train[:int(N*0.8)]
validation_features = x_train[int(N*0.8):]
validation_labels = y_train[int(N*0.8):]
for current_epoch in range(max_epochs):
epoch_start_time = time.time()
train_indices = np.random.permutation(train_features.shape[0])
pos = 0
epoch_training_error = 0
num_batches = 0
while pos < len(train_indices):
pos_end = min(pos + batch_size, len(train_indices))
x_train_minibatch = train_features[train_indices[pos:pos_end]]
y_train_minibatch = train_labels[train_indices[pos:pos_end]]
trainer.train_minibatch({x: x_train_minibatch, y: y_train_minibatch})
epoch_training_error += trainer.previous_minibatch_evaluation_average
num_batches += 1
pos = pos_end
epoch_training_error /= num_batches
epoch_validation_error = 0
num_batches = 0
pos = 0
while pos < len(validation_features):
pos_end = min(pos + batch_size, len(validation_features))
x_train_minibatch = validation_features[pos:pos_end]
y_train_minibatch = validation_labels[pos:pos_end]
previous_minibatch_evaluation_average = evaluator.test_minibatch({x: x_train_minibatch, y: y_train_minibatch})
epoch_validation_error += previous_minibatch_evaluation_average
num_batches += 1
pos = pos_end
epoch_validation_error /= num_batches
print('Epoch Elapsed Time: {0}, training_accuracy={1:.3f}, evaluation_accuracy={2:.3f}'.format(
datetime.timedelta(seconds=time.time() - epoch_start_time),
epoch_training_error, epoch_validation_error))
def save_to_files(x_train, y_train, x_test, y_test):
x_train = np.ascontiguousarray(x_train.astype(np.float32))
y_train = np.ascontiguousarray(y_train.astype(np.float32))
x_test = np.ascontiguousarray(x_test.astype(np.float32))
y_test = np.ascontiguousarray(y_test.astype(np.float32))
print(x_train.shape, y_train.shape, x_test.shape, y_test.shape)
x_train.tofile('x_train_imdb.bin')
y_train.tofile('y_train_imdb.bin')
x_test.tofile('x_test_imdb.bin')
y_test.tofile('y_test_imdb.bin')
def load_from_files(x_shape=(25000, 20), y_shape=(25000,)):
print('Loading .bin files')
x_train = np.fromfile('x_train_imdb.bin', dtype=np.float32)
y_train = np.fromfile('y_train_imdb.bin', dtype=np.float32)
x_test = np.fromfile('x_test_imdb.bin', dtype=np.float32)
y_test = np.fromfile('y_test_imdb.bin', dtype=np.float32)
x_train = np.reshape(x_train, newshape=x_shape)
y_train = np.reshape(y_train, newshape=y_shape)
x_test = np.reshape(x_test, newshape=x_shape)
y_test = np.reshape(y_test, newshape=y_shape)
return x_train, y_train, x_test, y_test
class Constants:
maxlen = 100 # We will cut reviews after 100 words
training_samples = 200 # We will be training on 200 samples
validation_samples = 10000 # We will be validating on 10000 samples
max_words = 10000 # We will only consider the top 10,000 words in the dataset
embedding_dim = 100
imdb_dir = 'C:\\Users\\anastasios\\Downloads\\aclImdb'
def load_texts_labels(path):
import tqdm
labels = []
texts = []
for label_type in ['neg', 'pos']:
dir_name = os.path.join(path, label_type)
print('\nLoading ', dir_name, '\n', flush=True)
for fname in tqdm.tqdm(os.listdir(dir_name)):
if fname[-4:] == '.txt':
f = open(os.path.join(dir_name, fname), encoding='utf8')
texts.append(f.read())
f.close()
if label_type == 'neg':
labels.append(0)
else:
labels.append(1)
return texts, labels
def tokenize_alImdb():
import keras.preprocessing.text
train_dir = os.path.join(Constants.imdb_dir, 'train')
texts, labels = load_texts_labels(train_dir)
tokenizer = keras.preprocessing.text.Tokenizer(num_words=Constants.max_words)
print('\n\nRunning tokenizer...', end='', flush=True)
tokenizer.fit_on_texts(texts)
return tokenizer, texts, labels
def from_raw_text_to_word_embeddings():
import numpy as np
import keras.preprocessing.sequence
tokenizer, texts, labels = tokenize_alImdb()
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = keras.preprocessing.sequence.pad_sequences(sequences, maxlen=Constants.maxlen)
data = np.asarray(data, dtype=np.float32)
labels = np.asarray(labels, dtype=np.float32)
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# Split the data into a training set and a validation set
# But first, shuffle the data, since we started from data
# where sample are ordered (all negative first, then all positive).
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
x_train = data[:Constants.training_samples]
y_train = labels[:Constants.training_samples]
x_val = data[Constants.training_samples: Constants.training_samples + Constants.validation_samples]
y_val = labels[Constants.training_samples: Constants.training_samples + Constants.validation_samples]
return tokenizer, x_train, y_train, x_val, y_val
def preprocess_embeddings():
import numpy as np
import tqdm
glove_dir = 'C:\\Users\\anastasios\\Downloads\\glove.6B'
embeddings_index = {}
glove_path = os.path.join(glove_dir, 'glove.6B.100d.txt')
f = open(glove_path, encoding='utf8')
print('Processing ', glove_path)
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
return embeddings_index
def build_model():
model = keras.models.Sequential()
model.add(keras.layers.Embedding(Constants.max_words, Constants.embedding_dim, input_length=Constants.maxlen))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(32, activation='relu'))
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.summary()
return model
def use_glove_word_embeddings_cntk(preload_weights=False):
tokenizer, x_train, y_train, x_val, y_val = from_raw_text_to_word_embeddings()
x = cntk.input_variable(shape=(Constants.maxlen,), dtype=np.float32)
y = cntk.input_variable(shape=(1,), dtype=np.float32)
model = cntk.one_hot(x, num_classes=Constants.max_words, sparse_output=True)
if preload_weights is True:
embedding_matrix = compute_embedding_matrix(tokenizer)
assert (Constants.embedding_dim == embedding_matrix.shape[0]) or (Constants.embedding_dim == embedding_matrix.shape[1])
model = cntk.layers.Embedding(weights=embedding_matrix)(model)
else:
model = cntk.layers.Embedding(Constants.embedding_dim)(model)
model = cntk.layers.Dense(32, activation=cntk.relu)(model)
model = cntk.layers.Dense(1, activation=cntk.sigmoid)(model)
loss_function = cntk.binary_cross_entropy(model.output, y)
round_predictions = cntk.round(model.output)
equal_elements = cntk.equal(round_predictions, y)
accuracy_function = cntk.reduce_mean(equal_elements, axis=0)
max_epochs = 10
batch_size = 32
learner = cntk.adam(model.parameters, cntk.learning_parameter_schedule_per_sample(0.0001), cntk.learning_parameter_schedule_per_sample(0.99))
progress_printer = cntk.logging.ProgressPrinter(tag='Training', num_epochs=max_epochs)
trainer = cntk.Trainer(model, (loss_function, accuracy_function), [learner], progress_printer)
evaluator = cntk.Evaluator(accuracy_function)
cntk_train(x, y, x_train, y_train, max_epochs, batch_size, trainer, evaluator)
def compute_embedding_matrix(tokenizer):
embeddings_index = preprocess_embeddings()
embedding_matrix = np.zeros((Constants.max_words, Constants.embedding_dim))
for word, i in tokenizer.word_index.items():
embedding_vector = embeddings_index.get(word)
if i < Constants.max_words:
if embedding_vector is not None:
# Words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
return embedding_matrix
def use_glove_word_embeddings(preload_weights=True):
tokenizer, x_train, y_train, x_val, y_val = from_raw_text_to_word_embeddings()
model = build_model()
if preload_weights:
embedding_matrix = compute_embedding_matrix(tokenizer)
model.layers[0].set_weights([embedding_matrix])
model.layers[0].trainable = False
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
history = model.fit(x_train, y_train,
epochs=10,
batch_size=32,
validation_data=(x_val, y_val))
model.save_weights('pre_trained_glove_model.h5')
plot_results(history)
def plot_results(history):
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
def evaluate_on_test_data():
import numpy as np
test_dir = os.path.join(Constants.imdb_dir, 'test')
tokenizer, _, _ = tokenize_alImdb()
texts, labels = load_texts_labels(test_dir)
sequences = tokenizer.texts_to_sequences(texts)
x_test = keras.preprocessing.sequence.pad_sequences(sequences, maxlen=Constants.maxlen)
y_test = np.asarray(labels)
model = build_model()
model.load_weights('pre_trained_glove_model.h5')
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
print(model.evaluate(x_test, y_test))
if __name__ == '__main__':
learning_word_embeddings_with_the_embedding_layer()
# learning_word_embeddings_with_the_embedding_layer_cntk()
use_glove_word_embeddings(preload_weights=True)
# use_glove_word_embeddings_cntk(preload_weights=True)
|
angrutils/expr.py | Ashaya123/angr-utils | 226 | 12725040 | # Expression evaluation routines
import claripy
def get_signed_range(se, expr):
"""
Calculate the range of the expression with signed boundaries
"""
size = expr.size()
umin = umax = smin = smax = None
if not sat_zero(se, expr):
try:
umin = se.min(expr, extra_constraints=[claripy.Extract(size-1,size-1,expr) == 0])
umax = se.max(expr, extra_constraints=[claripy.Extract(size-1,size-1,expr) == 0])
return (umin, umax)
except:
pass
try:
smin = -(1 << size) + se.min(expr, extra_constraints=[claripy.Extract(size-1,size-1,expr) == 1])
smax = -(1 << size) + se.max(expr, extra_constraints=[claripy.Extract(size-1,size-1,expr) == 1])
return (smin, smax)
except:
pass
return None
else:
try:
umax = se.max(expr, extra_constraints=[claripy.Extract(size-1,size-1,expr) == 0])
smin = 0
try:
smin = -(1 << size) + se.min(expr, extra_constraints=[claripy.Extract(size-1,size-1,expr) == 1])
except:
pass
return (smin, umax)
except:
pass
return None
def sat_zero(se, expr):
return se.satisfiable(extra_constraints=([expr == 0]))
def sat_negative(se, expr):
size = expr.size()
return se.satisfiable(extra_constraints=([claripy.Extract(size-1,size-1,expr) == 1]))
def sat_positive(se, expr):
return se.satisfiable(extra_constraints=([claripy.Extract(size-1,size-1,expr) == 0]))
|
heath/main.py | 121121321/chaoxing_auto_sign | 287 | 12725048 | # -*- coding: utf8 -*-
import os
import re
import json
import configparser
import threading
from datetime import datetime
from urllib import parse
from urllib.parse import quote
import requests
class HeathReport(object):
def __init__(self, user):
"""
:params username: 手机号或学号
:params password: 密码
:params schoolid: 学校代码,学号登录填写
"""
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
}
self._username = user['username']
self._password = user['password']
self._schoolid = user['schoolid']
self._session = requests.session()
self._session.headers = headers
def _login(self):
"""
登录: 支持手机和邮箱登录
"""
login_api = "https://passport2.chaoxing.com/api/login"
params = {
"name": self._username,
"pwd": <PASSWORD>,
"verify": "0",
"schoolid": self._schoolid if self._schoolid else ""
}
resp = self._session.get(login_api, params=params)
if resp.status_code == 403:
raise Exception("403,登录请求被拒绝")
data = json.loads(resp.text)
if data['result'] is False:
raise Exception(data['errorMsg'])
return data
def _get_last_heath_info(self) -> dict:
"""
获取上次提交的健康信息
"""
params = {
"cpage": "1",
"formId": "7185",
"enc": "f837c93e0de9d9ad82db707b2c27241e",
"formAppId": ""
}
api = 'http://office.chaoxing.com/data/apps/forms/fore/forms/user/last/info'
resp = self._session.get(api, params=params)
raw_data = json.loads(resp.text)
if not raw_data['data']:
raise Exception('获取上次提交数据为空,可能为今日已提交')
return raw_data
def _read_form_data_file(self):
with open('./report_template.json', 'r', encoding='utf-8') as f:
return f.read()
def _set_value(self, last_report_data, form_data_template):
def get_val(data: dict, _id: str):
return data['data']['formsUser']['formIdValueData'][_id]['groupValues'][0]['values'][0][0]
username = get_val(last_report_data, '1')['val']
id_number = get_val(last_report_data, '2')['val']
telephone_number = get_val(last_report_data, '3')['val']
address = get_val(last_report_data, '4')
form_data = form_data_template.replace("$cx_username", username). \
replace("$cx_id_number", id_number). \
replace("$cx_telephone_number", telephone_number). \
replace("$cx_address", address['address']). \
replace("$cx_lng", address['lng']). \
replace("$cx_lat", address['lat'])
return form_data
@staticmethod
def form_data_to_urlencoded(params: dict, form_data: str) -> str:
"""
dict -> urlencoded
"""
payload = parse.urlencode(params)
payload += "&formData=" + quote(form_data, 'utf-8')
payload = payload.replace("%2B", "+")
return payload
def _daily_report(self, check_code: str, form_data: str) -> dict:
"""
上报今日信息
"""
save_api = "http://office.chaoxing.com/data/apps/forms/fore/user/save?lookuid=127973604"
params = {
"gatherId": "0",
"formId": "7185",
"formAppId": "",
"version": 6,
"checkCode": check_code,
"enc": "f837c93e0de9d9ad82db707b2c27241e",
"anonymous": 0,
"ext": "",
"t": 1,
"uniqueCondition": [],
"gverify": ""
}
payload = self.form_data_to_urlencoded(params, form_data)
resp = self._session.post(save_api, data=payload)
return json.loads(resp.text)
def _request_form_page(self):
"""
请求表单页面
@return:
@rtype:
"""
form_url = "http://office.chaoxing.com/front/web/apps/forms/fore/apply?uid=127973604&code=l5RJsW2w&mappId=4545821&appId=1e354ddb52a743e88ed19a3704b1cf1a&appKey=127G2jhIhl05mw3S&id=7185&enc=f837c93e0de9d9ad82db707b2c27241e&state=39037&formAppId=&fidEnc=b06cba4a51ac2253"
return self._session.get(url=form_url)
def _get_check_code(self):
"""
解析表单界面获取checkCode
@return: checkCode
@rtype: str
"""
resp = self._request_form_page()
code = re.findall(r"checkCode.*'(.*)'", resp.text)
if code:
return code[0]
else:
raise Exception("校验码获取失败")
def daily_report(self) -> dict:
"""
健康信息上报入口
"""
self._login()
last_report_data = self._get_last_heath_info()
form_data_template = self._read_form_data_file()
form_data = self._set_value(last_report_data, form_data_template)
check_code = self._get_check_code()
return self._daily_report(check_code=check_code, form_data=form_data)
def start_report(user):
result = HeathReport(user).daily_report()
print(f"{user['username']} - 打卡结果: \n {result}")
try:
sendkey = user['sendkey']
except Exception as exc:
sendkey = None
if not sendkey:
print(f"{user['username']} - 未开启消息推送")
return
resp = server_chan_send(result, sendkey)
try:
resp.raise_for_status()
print(f"{user['username']} - 本次打卡详情已发送")
except Exception as exc:
text = exc.response.json()
print(f"{user['username']} - 消息发送失败,原因:{text['info']}")
def server_chan_send(msg, key):
"""server酱将消息推送"""
params = {
'title': '健康日报打卡消息来啦!\n{}'.format(datetime.now().strftime('%Y年%m月%d日 %H:%M:%D')),
'desp': msg
}
resp = requests.request(
method="GET",
url=f"https://sctapi.ftqq.com/{key}.send?title=messagetitle",
params=params
)
return resp
def load_user_config():
"""
加载每个用户的配置
"""
config = configparser.ConfigParser()
users = []
if os.getenv("cx_env") == "dev":
# 加载开发环境配置文件
config.read('./config.dev.ini')
else:
config.read('./config.ini')
for sections in config:
section = config[sections]
if 'user' not in str(section):
continue
else:
try:
open_status = section['open']
except Exception as exc:
raise Exception("open字段必填,true 或者 false") from exc
if open_status in ("true", "True"):
users.append(section)
username = os.environ.get('username')
if username:
users.append({
'username': os.environ.get('username'),
'password': <PASSWORD>('password'),
'schoolid': os.environ.get('schoolid'),
'sendkey': os.environ.get('schoolid')
})
if not users:
raise Exception("当前暂无账号执行,请在config.ini 或 环境变量中配置账号密码")
return users
def main_handler(event=None, context=None):
if event is not None:
query: dict = event.get("queryString", "")
if query:
user = dict(
username=query.get("name", None),
password=query.get("<PASSWORD>", None),
schoolid=query.get("schoolid", ""),
send_key=query.get("skey", None),
)
try:
h = HeathReport(user)
result = h.daily_report()
except Exception as e:
result = e
return result
else:
threads = []
for user in load_user_config():
t = threading.Thread(target=start_report, args=(user,))
t.start()
threads.append(t)
for t in threads:
t.join()
if __name__ == '__main__':
main_handler()
|
doc/integrations/label-studioAPI/setup.py | novium258/cortx-1 | 552 | 12725066 | <filename>doc/integrations/label-studioAPI/setup.py
from setuptools import setup
setup(
name='Cortx S3-Label Studio Integration',
version='1.0.0',
packages=[ '' ],
url='',
license='MIT ',
author='sumit',
author_email='<EMAIL>',
description='Cortx S3 integration with Label Studio, one of the best open-source data annotation tool used by companies like Nvidia, IBM, Cloudflare. Using Cortx Ecosystem to store world\'s growing unstructured data and making AI/ML tasks faster. Cortx provides scalability, efficiency and security anytime.'
)
|
Python3/862.py | rakhi2001/ecom7 | 854 | 12725086 | <gh_stars>100-1000
__________________________________________________________________________________________________
sample 856 ms submission
class Solution:
def shortestSubarray(self, nums: List[int], k: int) -> int:
NOT_FOUND = -1
if not nums:
return NOT_FOUND
n = len(nums)
ans = n + 1
total = 0
queue = collections.deque([(-1, 0)])
for i, x in enumerate(nums):
total += x
if x > 0:
while queue and total - queue[0][1] >= k:
ans = min(ans, i - queue.popleft()[0])
else:
while queue and total <= queue[-1][1]:
queue.pop()
queue.append((i, total))
return ans if ans <= n else NOT_FOUND
__________________________________________________________________________________________________
sample 17160 kb submission
class Solution:
def shortestSubarray(self, A: List[int], K: int) -> int:
if len(A) ==0:
return -1
begin=0
end=0
# start with a non-negative number
while A[begin]<=0 and begin<len(A):
begin +=1
end+=1
c_sum=0
size = len(A)
found=False
while end <len(A):
c_sum += A[end]
modified= False if c_sum <K else True
if c_sum >= K:
found=True
# we know the first element won't be negative
# if the element is negative try and spread it's value to previous elements
if A[end]<0:
if begin == end:
begin+=1
c_sum=0
else:
i=1
to_save = abs(A[end])
while to_save > 0:
while A[end-i] ==0 and end-i>=begin:
i+=1
if end-i <begin:
begin = end+1
c_sum=0
to_save=0
break
else:
if to_save > A[end-i]:
to_save -=A[end-i]
A[end-i]=0
else:
A[end-i] -= to_save
A[end]=0
to_save=0
else:
# while the element you are adding compensates
# for moving the begin by one to the right and makes/keeps sum >= K
while A[end] >= A[begin] + K-(c_sum-A[end]):
c_sum-=A[begin]
if c_sum <K:
c_sum+=A[begin]
break
else:
begin+=1
size = min(size,end-begin+1)
if c_sum >=K:
size = min(size,end-begin+1)
end +=1
if found:
return size
else:
return -1
__________________________________________________________________________________________________
|
linformer_pytorch/linformer_pytorch.py | tatp22/linformer-pytorch | 322 | 12725097 | <filename>linformer_pytorch/linformer_pytorch.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
def identity(x, *args, **kwargs):
return x
def get_act(activation):
if activation == "gelu":
return F.gelu
if activation == "relu":
return F.relu
return None
def gen_causal_mask(input_size, dim_k, full_attention=False):
"""
Generates a causal mask of size (input_size, dim_k) for linformer
Else, it generates (input_size, input_size) for full attention
"""
if full_attention:
return (torch.triu(torch.ones(input_size, input_size))==1).transpose(0,1)
return (torch.triu(torch.ones(dim_k, input_size))==1).transpose(0,1)
def get_EF(input_size, dim, method="learnable", head_dim=None, bias=True):
"""
Retuns the E or F matrix, initialized via xavier initialization.
This is the recommended way to do it according to the authors of the paper.
Includes a method for convolution, as well as a method for no additional params.
"""
assert method == "learnable" or method == "convolution" or method == "no_params", "The method flag needs to be either 'learnable', 'convolution', or 'no_params'!"
if method == "convolution":
conv = nn.Conv1d(head_dim, head_dim, kernel_size=int(input_size/dim), stride=int(input_size/dim))
return conv
if method == "no_params":
mat = torch.zeros((input_size, dim))
torch.nn.init.normal_(mat, mean=0.0, std=1/dim)
return mat
lin = nn.Linear(input_size, dim, bias)
torch.nn.init.xavier_normal_(lin.weight)
return lin
class Residual(nn.Module):
"""
Implemenation taken from
https://github.com/lucidrains/sinkhorn-transformer/blob/master/sinkhorn_transformer/sinkhorn_transformer.py
However, I do postnorm instead of prenorm.
"""
def __init__(self, fn, input_channels=0, output_channels=0):
super(Residual, self).__init__()
self.fn = fn
self.resample = nn.Linear(input_channels, output_channels) if input_channels != output_channels else None
self.norm = nn.LayerNorm(output_channels)
def forward(self, tensor, **kwargs):
if self.resample is not None:
tensor = self.resample(tensor) + self.fn(tensor, **kwargs)
tensor = self.norm(tensor)
return tensor
tensor = tensor + self.fn(tensor, **kwargs)
tensor = self.norm(tensor)
return tensor
class PositionalEmbedding(nn.Module):
"""
Standard positional embedding.
From the paper "Attention is all you need".
Changed the constant from 10k to 100k, since this may be better for longer sequence lengths.
"""
def __init__(self, channels):
super(PositionalEmbedding, self).__init__()
inv_freq = 1. / (100000 ** (torch.arange(0, channels, 2).float() / channels))
self.register_buffer('inv_freq', inv_freq)
def forward(self, tensor):
pos = torch.arange(tensor.shape[1], device=tensor.device).type(self.inv_freq.type())
sin_inp = torch.einsum("i,j->ij", pos, self.inv_freq)
emb = torch.cat((sin_inp.sin(), sin_inp.cos()), dim=-1)
return emb[None,:,:]
class ProjectInOut(nn.Module):
"""
Impelemenation taken from https://github.com/lucidrains/sinkhorn-transformer/blob/73da02958965e1a690cb301292c0a3c549687d44/sinkhorn_transformer/sinkhorn_transformer.py#L218
"""
def __init__(self, fn, dim_in, dim_out, project_out=True):
super(ProjectInOut, self).__init__()
self.fn = fn
self.project_in = nn.Linear(dim_in, dim_out)
self.project_out = nn.Linear(dim_out, dim_in) if project_out else identity
def forward(self, tensor, **kwargs):
tensor = self.project_in(tensor)
tensor = self.fn(tensor, **kwargs)
tensor = self.project_out(tensor)
return tensor
class FeedForward(nn.Module):
"""
Standard Feed Forward Layer
"""
def __init__(self, input_channels, output_channels, ff_dim, dropout, activation="gelu"):
super(FeedForward, self).__init__()
self.w_1 = nn.Linear(input_channels, ff_dim)
self.w_2 = nn.Linear(ff_dim, output_channels)
self.activation = get_act(activation)
self.dropout = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
def forward(self, tensor, **kwargs):
tensor = self.w_1(tensor)
if self.activation is not None:
tensor = self.activation(tensor)
tensor = self.dropout(tensor)
tensor = self.w_2(tensor)
tensor = self.dropout2(tensor)
return tensor
class LinearAttentionHead(nn.Module):
"""
Linear attention, as proposed by the linformer paper
"""
def __init__(self, dim, dropout, E_proj, F_proj, causal_mask, full_attention=False):
super(LinearAttentionHead, self).__init__()
self.E = E_proj
self.F = F_proj
self.dim = dim
self.dropout = nn.Dropout(dropout)
self.P_bar = None
self.full_attention = full_attention
self.causal_mask = causal_mask
self.is_proj_tensor = isinstance(E_proj, torch.Tensor)
def forward(self, Q, K, V, **kwargs):
"""
Assume Q, K, V have same dtype
E, F are `nn.Linear` modules
"""
input_mask = kwargs["input_mask"] if "input_mask" in kwargs else None
embeddings_mask = kwargs["embeddings_mask"] if "embeddings_mask" in kwargs else None
# Instead of classic masking, we have to do this, because the classic mask is of size nxn
if input_mask is not None:
# This is for k, v
mask = input_mask[:,:,None]
K = K.masked_fill_(~mask, 0.0)
V = V.masked_fill_(~mask, 0.0)
del mask
if embeddings_mask is not None:
mask = embeddings_mask[:,:,None]
Q = Q.masked_fill_(~mask, 0.0)
del mask
K = K.transpose(1,2)
if not self.full_attention:
if self.is_proj_tensor:
self.E = self.E.to(K.device)
K = torch.matmul(K, self.E)
else:
K = self.E(K)
Q = torch.matmul(Q, K)
P_bar = Q/torch.sqrt(torch.tensor(self.dim).type(Q.type())).to(Q.device)
if self.causal_mask is not None:
self.causal_mask = self.causal_mask.to(Q.device)
P_bar = P_bar.masked_fill_(~self.causal_mask, float('-inf'))
P_bar = P_bar.softmax(dim=-1)
# Only save this when visualizing
if "visualize" in kwargs and kwargs["visualize"] == True:
self.P_bar = P_bar
P_bar = self.dropout(P_bar)
if not self.full_attention:
V = V.transpose(1,2)
if self.is_proj_tensor:
self.F = self.F.to(V.device)
V = torch.matmul(V, self.F)
else:
V = self.F(V)
V = V.transpose(1,2)
out_tensor = torch.matmul(P_bar, V)
return out_tensor
class MHAttention(nn.Module):
"""
Multihead attention, with each head being a Linformer Head
This feeds directly into a feed forward head
"""
def __init__(self, input_size, dim, channels, dim_k, nhead, dropout, checkpoint_level,
parameter_sharing, E_proj, F_proj, full_attention, causal_mask, w_o_intermediate_dim=None, decoder_mode=False, method="learnable"):
super(MHAttention, self).__init__()
self.heads = nn.ModuleList()
self.input_size = input_size
self.dim_k = dim_k
self.channels = channels
self.causal_mask = causal_mask
self.checkpoint_level = checkpoint_level
self.w_o_intermediate_dim = w_o_intermediate_dim
if parameter_sharing != "layerwise":
E_proj = get_EF(input_size, dim_k, method, dim)
F_proj = get_EF(input_size, dim_k, method, dim) if parameter_sharing == "none" or parameter_sharing == "headwise" else E_proj
self.decoder_mode = decoder_mode
self.to_q = nn.ModuleList()
self.to_k = nn.ModuleList()
self.to_v = nn.ModuleList()
for _ in range(nhead):
if parameter_sharing == "none":
E_proj = get_EF(input_size, dim_k, method, dim)
F_proj = get_EF(input_size, dim_k, method, dim)
attn = LinearAttentionHead(dim, dropout, E_proj, F_proj, causal_mask, full_attention)
self.heads.append(attn)
self.to_q.append(nn.Linear(channels, dim, bias=False))
self.to_k.append(nn.Linear(channels, dim, bias=False))
self.to_v.append(nn.Linear(channels, dim, bias=False))
if w_o_intermediate_dim is None:
self.w_o = nn.Linear(dim*nhead, channels)
else:
self.w_o_1 = nn.Linear(dim*nhead, w_o_intermediate_dim)
self.w_o_2 = nn.Linear(w_o_intermediate_dim, channels)
self.mh_dropout = nn.Dropout(dropout)
def forward(self, tensor, **kwargs):
batch_size, input_len, channels = tensor.shape
assert not (self.decoder_mode and "embeddings" not in kwargs), "Embeddings must be supplied if decoding"
assert not ("embeddings" in kwargs and (kwargs["embeddings"].shape[0], kwargs["embeddings"].shape[1], kwargs["embeddings"].shape[2]) != (batch_size, input_len, channels)), "Embeddings size must be the same as the input tensor"
head_outputs = []
for index, head in enumerate(self.heads):
Q = self.to_q[index](tensor)
K = self.to_k[index](tensor) if not self.decoder_mode else self.to_k[index](kwargs["embeddings"])
V = self.to_v[index](tensor) if not self.decoder_mode else self.to_v[index](kwargs["embeddings"])
if self.checkpoint_level == "C2":
head_outputs.append(checkpoint(head,Q,K,V))
else:
head_outputs.append(head(Q,K,V,**kwargs))
out = torch.cat(head_outputs, dim=-1)
if self.w_o_intermediate_dim is None:
out = self.w_o(out)
else:
out = self.w_o_1(out)
out = self.w_o_2(out)
out = self.mh_dropout(out)
return out
class Linformer(nn.Module):
"""
My attempt at reproducing the Linformer Paper
https://arxiv.org/pdf/2006.04768.pdf
"""
def __init__(self, input_size, channels, dim_k, dim_ff=256, dim_d=None, dropout_ff=0.15, nhead=4, depth=1, dropout=0.1, activation="gelu", checkpoint_level="C0", parameter_sharing="layerwise", k_reduce_by_layer=0, full_attention=False, include_ff=True, w_o_intermediate_dim=None, decoder_mode=False, causal=False, method="learnable", ff_intermediate=None):
super(Linformer, self).__init__()
assert activation == "gelu" or activation == "relu", "Only gelu and relu activations supported for now"
assert checkpoint_level == "C0" or checkpoint_level == "C1" or checkpoint_level == "C2", "Checkpoint level has to be either C0, C1, or C2."
assert parameter_sharing == "none" or parameter_sharing == "headwise" or parameter_sharing == "kv" or parameter_sharing == "layerwise", "The `parameter_sharing` flag has to be either 'none', 'headwise', 'kv', or 'layerwise'."
assert channels % nhead == 0 if dim_d is None else True, "If `dim_d` is not set to a custom value, `channels` must be divisible by `nhead`!"
assert not (ff_intermediate and parameter_sharing=="layerwise"), "Parameter sharing must not be layerwise if ff_intermediate is enabled!"
assert not (ff_intermediate and decoder_mode), "Raising the dimension in the middle cannot be done in the decoder!"
layers = nn.ModuleList()
self.decoder_mode = decoder_mode
self.input_size = input_size
self.channels = channels
self.checkpoint_level = checkpoint_level
self.depth = depth
self.nhead = nhead
head_dim = channels // nhead if dim_d is None else dim_d
E_proj = get_EF(input_size, dim_k, method, head_dim)
causal_mask = gen_causal_mask(input_size, dim_k, full_attention) if causal else None
# If we want causal but only with the encoder
causal_enc = gen_causal_mask(input_size, dim_k, full_attention) if (causal and not decoder_mode) else None
get_attn = lambda attn_channels, curr_dim_k: MHAttention(input_size, head_dim, attn_channels, curr_dim_k, nhead, dropout, checkpoint_level, parameter_sharing, E_proj, E_proj, full_attention, causal_enc, w_o_intermediate_dim, decoder_mode=False, method=method)
get_attn_context = lambda attn_channels, curr_dim_k: MHAttention(input_size, head_dim, attn_channels, curr_dim_k, nhead, dropout, checkpoint_level, parameter_sharing, E_proj, E_proj, full_attention, causal_mask, w_o_intermediate_dim, decoder_mode=True, method=method)
get_ff = lambda input_channels, output_channels: FeedForward(input_channels, output_channels, dim_ff, dropout_ff, activation)
for index in range(depth):
input_channels = ff_intermediate if (index != 0 and ff_intermediate is not None) and not decoder_mode else channels
output_channels = ff_intermediate if (index != depth-1 and ff_intermediate is not None) and not decoder_mode else channels
# TODO: Change the input and output channels here
attn_layer = get_attn(input_channels, max(1, dim_k - index*k_reduce_by_layer))
ff_layer = get_ff(input_channels, output_channels)
attn_layer, ff_layer = map(lambda res_ch_in, res_ch_out, fn: Residual(fn, res_ch_in, res_ch_out), (input_channels, input_channels), (input_channels, output_channels), (attn_layer, ff_layer))
if include_ff:
layers.extend([attn_layer, ff_layer])
else:
layers.extend([attn_layer])
if not self.decoder_mode:
continue
attn_context = get_attn_context(channels, max(1, dim_k - index*k_reduce_by_layer))
ff_context = get_ff(channels, channels)
attn_context, ff_context = map(lambda fn: Residual(fn, channels, channels), (attn_context, ff_context))
if include_ff:
layers.extend([attn_context, ff_context])
else:
layers.extend([attn_context])
self.seq = layers
def forward(self, tensor, **kwargs):
"""
Input is (batch_size, seq_len, channels)
"""
bt, n, c = tensor.shape
assert n == self.input_size, "This tensor is of the wrong size. Dimension 1 has to match the `input_size` flag"
assert c == self.channels, "This tensor is of the wrong size. Dimension 2 has to match the `channels` flag"
assert self.checkpoint_level == "C0" if kwargs else True, "Cannot run checkpointing when using kwargs. Please set the checkpoint level to `C0`"
assert "embeddings" not in kwargs or self.decoder_mode, "If decoding, needs to be initialized with `decoder_mode=True`"
for layer in self.seq:
if self.checkpoint_level != "C0":
tensor = checkpoint(layer, tensor)
else:
tensor = layer(tensor, **kwargs)
return tensor
class LinformerLM(nn.Module):
"""
A wrapper function to accept LM tasks, inspired by https://github.com/lucidrains/sinkhorn-transformer
"""
def __init__(self, num_tokens, input_size, channels,
dim_k=64, dim_ff=1024, dim_d=None,
dropout_ff=0.1, dropout_tokens=0.1, nhead=4, depth=2, ff_intermediate=None,
dropout=0.05, activation="gelu", checkpoint_level="C0",
parameter_sharing="layerwise", k_reduce_by_layer=0, full_attention=False,
include_ff=True, w_o_intermediate_dim=None, emb_dim=None,
return_emb=False, decoder_mode=False, causal=False, method="learnable"):
super(LinformerLM, self).__init__()
emb_dim = channels if emb_dim is None else emb_dim
self.input_size = input_size
self.to_token_emb = nn.Embedding(num_tokens, emb_dim)
self.pos_emb = PositionalEmbedding(emb_dim)
self.linformer = Linformer(input_size, channels, dim_k=dim_k,
dim_ff=dim_ff, dim_d=dim_d, dropout_ff=dropout_ff,
nhead=nhead, depth=depth, dropout=dropout, ff_intermediate=ff_intermediate,
activation=activation, checkpoint_level=checkpoint_level, parameter_sharing=parameter_sharing,
k_reduce_by_layer=k_reduce_by_layer, full_attention=full_attention, include_ff=include_ff,
w_o_intermediate_dim=w_o_intermediate_dim, decoder_mode=decoder_mode, causal=causal, method=method)
if emb_dim != channels:
self.linformer = ProjectInOut(self.linformer, emb_dim, channels)
self.to_logits = identity if return_emb else nn.Linear(emb_dim, num_tokens)
self.dropout_tokens = nn.Dropout(dropout_tokens)
def forward(self, tensor, **kwargs):
"""
Input is (batch_size, seq_len), and all items are ints from [0, num_tokens-1]
"""
tensor = self.to_token_emb(tensor)
tensor = self.pos_emb(tensor).type(tensor.type()) + tensor
tensor = self.dropout_tokens(tensor)
tensor = self.linformer(tensor, **kwargs)
tensor = self.to_logits(tensor)
return tensor
class LinformerEncDec(nn.Module):
"""
A complete seq -> seq translation task. Complete with an encoder and a decoder module.
"""
def __init__(self, enc_num_tokens, enc_input_size, enc_channels, dec_num_tokens, dec_input_size, dec_channels,
enc_dim_k=64, enc_dim_ff=1024, enc_dim_d=None, enc_ff_intermediate=None, dec_ff_intermediate=None,
enc_dropout_ff=0.1, enc_nhead=4, enc_depth=2, enc_dropout=0.05, enc_parameter_sharing="layerwise", enc_k_reduce_by_layer=0,
enc_full_attention=False, enc_include_ff=True, enc_w_o_intermediate_dim=None, enc_emb_dim=None, enc_method="learnable",
dec_dim_k=64, dec_dim_ff=1024, dec_dim_d=None, dec_dropout_ff=0.1, dec_nhead=4, dec_depth=2, dec_dropout=0.05,
dec_parameter_sharing="layerwise", dec_k_reduce_by_layer=0, dec_full_attention=False, dec_include_ff=True,
dec_w_o_intermediate_dim=None, dec_emb_dim=None, dec_method="learnable", activation="gelu", checkpoint_level="C0"):
super(LinformerEncDec, self).__init__()
self.encoder = LinformerLM(num_tokens=enc_num_tokens, input_size=enc_input_size, channels=enc_channels, dim_d=enc_dim_d, dim_ff=enc_dim_ff,
dim_k=enc_dim_k, dropout_ff=enc_dropout_ff, nhead=enc_nhead, depth=enc_depth, dropout=enc_dropout,
parameter_sharing=enc_parameter_sharing, k_reduce_by_layer=enc_k_reduce_by_layer, ff_intermediate=enc_ff_intermediate,
full_attention=enc_full_attention, include_ff=enc_include_ff, w_o_intermediate_dim=enc_w_o_intermediate_dim,
emb_dim=enc_emb_dim, return_emb=True, activation=activation, checkpoint_level=checkpoint_level, method=enc_method)
self.decoder = LinformerLM(num_tokens=dec_num_tokens, input_size=dec_input_size, channels=dec_channels, dim_d=dec_dim_d, dim_ff=dec_dim_ff,
dim_k=dec_dim_k, dropout_ff=dec_dropout_ff, nhead=dec_nhead, depth=dec_depth, dropout=dec_dropout, ff_intermediate=dec_ff_intermediate,
parameter_sharing=dec_parameter_sharing, k_reduce_by_layer=dec_k_reduce_by_layer, method=dec_method,
full_attention=dec_full_attention, include_ff=dec_include_ff, w_o_intermediate_dim=dec_w_o_intermediate_dim,
emb_dim=dec_emb_dim, decoder_mode=True, causal=True, activation=activation, checkpoint_level=checkpoint_level)
def forward(self, x, y=None, **kwargs):
"""
Input is (batch_size, seq_len), and all items are ints from [0, num_tokens-1]
"""
encoder_output = self.encoder(x, **kwargs)
y = y if y is not None else x
return self.decoder(y, embeddings=encoder_output)
|
src/genie/libs/parser/iosxe/tests/ShowCdpNeighbors/cli/equal/device_output_5_expected.py | balmasea/genieparser | 204 | 12725099 | expected_output = {
"cdp": {
"index": {
1: {
"capability": "R S C",
"device_id": "Device_With_A_Particularly_Long_Name",
"hold_time": 134,
"local_interface": "GigabitEthernet1",
"platform": "N9K-9000v",
"port_id": "Ethernet0/0",
},
2: {
"capability": "S I",
"device_id": "another_device_with_a_long_name",
"hold_time": 141,
"local_interface": "TwentyFiveGigE1/0/3",
"platform": "WS-C3850-",
"port_id": "TenGigabitEthernet1/1/4",
},
}
}
}
|
LeetCode/python3/394.py | ZintrulCre/LeetCode_Archiver | 279 | 12725105 | class Solution:
def decodeString(self, s: str) -> str:
stack = []
stack.append([1, ""])
num = 0
for l in s:
if l.isdigit():
num = num * 10 + ord(l) - ord('0')
elif l == '[':
stack.append([num, ""])
num = 0
elif l == ']':
stack[-2][1] += stack[-1][0] * stack[-1][1]
stack.pop()
else:
stack[-1][1] += l
return stack[0][1]
|
Algo and DSA/LeetCode-Solutions-master/Python/number-of-rectangles-that-can-form-the-largest-square.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 12725110 | <filename>Algo and DSA/LeetCode-Solutions-master/Python/number-of-rectangles-that-can-form-the-largest-square.py
# Time: O(n)
# Space: O(1)
class Solution(object):
def countGoodRectangles(self, rectangles):
"""
:type rectangles: List[List[int]]
:rtype: int
"""
result = mx = 0
for l, w in rectangles:
side = min(l, w)
if side > mx:
result, mx = 1, side
elif side == mx:
result += 1
return result
|
tools/SDKTool/src/ui/tree/ui_tree/over_node_info.py | Passer-D/GameAISDK | 1,210 | 12725127 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making GameAISDK available.
This source code file is licensed under the GNU General Public License Version 3.
For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package.
Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
"""
import logging
from .base_node_info import BaseNodeInfo
from ....config_manager.ui.ui_manager import UIType
from ....config_manager.ui.ui_action import ROI
from ..project_data_manager import ProjectDataManager
from ...utils import get_value
from ....common.define import DEFAULT_TEMPLATE_THRESHOLD
logger = logging.getLogger("sdktool")
class OverNodeInfo(BaseNodeInfo):
def __init__(self):
super(OverNodeInfo, self).__init__(UIType.OVER_UI.value)
def init(self, config_value):
self._node_cfg.clear()
self._node_cfg["name"] = config_value.get("name")
self._node_cfg["id"] = config_value.get("id") or -1
self._node_cfg["actionType"] = config_value.get("actionType") or "click"
self._node_cfg["desc"] = config_value.get("desc") or ""
self._node_cfg["imgPath"] = config_value.get("imgPath") or ""
if len(self._node_cfg["imgPath"]) > 0:
self._node_cfg["imgPath"] = ProjectDataManager().change_to_tool_path(self._node_cfg["imgPath"])
self._node_cfg["ROI"] = self.int_roi(config_value)
if self._node_cfg["actionType"] == 'click':
self._node_cfg["action"] = self.init_click_action(config_value)
elif self._node_cfg["actionType"] == 'drag':
self._node_cfg["action"] = self.init_drag_action(config_value)
return self._node_cfg
def change_to_data_cfg(self):
data_cfg = dict()
rois = self._node_cfg.get('ROI')
if rois is None:
logger.error("not have over item")
return data_cfg
data_cfg["element_id"] = int(self._node_cfg.get("id") or -1)
data_cfg["element_name"] = self._node_cfg.get('name')
data_cfg['description'] = self._node_cfg.get('desc')
data_cfg["action_type"] = self._node_cfg.get('actionType')
data_cfg['img_path'] = self._node_cfg.get('imgPath')
data_cfg["img_path"] = ProjectDataManager().change_to_sdk_path(data_cfg["img_path"])
# rois = self._node_cfg.get('ROI')
# if rois is not None:
roi = rois[0]
x = int(get_value(roi, 'x', 0))
y = int(get_value(roi, 'y', 0))
w = int(get_value(roi, 'w', 0))
h = int(get_value(roi, 'h', 0))
threshold = float(roi.get('templateThreshold', DEFAULT_TEMPLATE_THRESHOLD))
data_cfg['roi'] = ROI(x, y, w, h, threshold)
if data_cfg["action_type"] == 'click':
data_cfg['action'] = self.get_click_action()
elif data_cfg['action_type'] == 'drag':
data_cfg['drag_start'], data_cfg['drag_end'] = self.get_drag_action()
return data_cfg
|
examples/multiline_plot.py | ATayls/DnaFeaturesViewer | 391 | 12725130 | """In this example we plot a record fragment with sequence over multiple lines.
"""
from dna_features_viewer import BiopythonTranslator
translator = BiopythonTranslator()
graphic_record = translator.translate_record("example_sequence.gb")
subrecord = graphic_record.crop((1700, 2000))
fig, axes = subrecord.plot_on_multiple_lines(
nucl_per_line=70, plot_sequence=True
)
fig.savefig("multiline_plot.png")
|
L1Trigger/GlobalTriggerAnalyzer/python/L1ExtraInputTagSet_cff.py | ckamtsikis/cmssw | 852 | 12725147 | # Set of input tags for L1Extra in agreement with L1Reco_cff
#
# <NAME> 2012-05-22
import FWCore.ParameterSet.Config as cms
L1ExtraInputTagSet = cms.PSet(
L1ExtraInputTags=cms.PSet(
TagL1ExtraMuon=cms.InputTag("l1extraParticles"),
TagL1ExtraIsoEG=cms.InputTag("l1extraParticles", "Isolated"),
TagL1ExtraNoIsoEG=cms.InputTag("l1extraParticles", "NonIsolated"),
TagL1ExtraCenJet=cms.InputTag("l1extraParticles", "Central"),
TagL1ExtraForJet=cms.InputTag("l1extraParticles", "Forward"),
TagL1ExtraTauJet=cms.InputTag("l1extraParticles", "Tau"),
TagL1ExtraEtMissMET=cms.InputTag("l1extraParticles", "MET"),
TagL1ExtraEtMissHTM=cms.InputTag("l1extraParticles", "MHT"),
TagL1ExtraHFRings=cms.InputTag("l1extraParticles")
)
)
|
attic/iterables/CACM/less_more.py | matteoshen/example-code | 5,651 | 12725166 | <reponame>matteoshen/example-code
"""
<NAME> - The Curse of the Excluded Middle
DOI:10.1145/2605176
CACM vol.57 no.06
"""
def less_than_30(n):
check = n < 30
print('%d < 30 : %s' % (n, check))
return check
def more_than_20(n):
check = n > 20
print('%d > 20 : %s' % (n, check))
return check
l = [1, 25, 40, 5, 23]
q0 = (n for n in l if less_than_30(n))
q1 = (n for n in q0 if more_than_20(n))
for n in q1:
print('-> %d' % n)
|
src/quicknlp/metrics.py | jalajthanaki/quick-nlp | 287 | 12725175 | <reponame>jalajthanaki/quick-nlp
import torch
from fastai.core import to_np
import numpy as np
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
def token_accuracy(preds, targs):
preds = torch.max(preds, dim=-1)[1]
return (preds[:-1] == targs.data).float().mean()
def perplexity(preds, targs):
return torch.exp(-preds.mean())
def bleu_score(preds, targs, stoi=None):
sf = SmoothingFunction().method1
preds = torch.max(preds, dim=-1)[1][:-1]
bleus = np.zeros(targs.size(1))
for res in zip(to_np(targs, preds)):
if len(res[1]) > 2:
bleu = sentence_bleu([res[1]], res[2], smoothing_function=sf, weights=(1 / 3., 1 / 3., 1 / 3.))
elif len(res[1]) == 2:
bleu = sentence_bleu([res[1]], res[2], smoothing_function=sf, weights=(0.5, 0.5))
else:
bleu = sentence_bleu([res[1]], res[2], smoothing_function=sf, weights=(1.0,))
bleus.append(bleu)
return
|
modelvshuman/datasets/info_mappings.py | TizianThieringer/model-vs-human | 158 | 12725346 | from abc import ABC
class ImagePathToInformationMapping(ABC):
def __init__(self):
pass
def __call__(self, full_path):
pass
class ImageNetInfoMapping(ImagePathToInformationMapping):
"""
For ImageNet-like directory structures without sessions/conditions:
.../{category}/{img_name}
"""
def __call__(self, full_path):
session_name = "session-1"
img_name = full_path.split("/")[-1]
condition = "NaN"
category = full_path.split("/")[-2]
return session_name, img_name, condition, category
class ImageNetCInfoMapping(ImagePathToInformationMapping):
"""
For the ImageNet-C Dataset with path structure:
...{corruption function}/{corruption severity}/{category}/{img_name}
"""
def __call__(self, full_path):
session_name = "session-1"
parts = full_path.split("/")
img_name = parts[-1]
category = parts[-2]
severity = parts[-3]
corruption = parts[-4]
condition = "{}-{}".format(corruption, severity)
return session_name, img_name, condition, category
class InfoMappingWithSessions(ImagePathToInformationMapping):
"""
Directory/filename structure:
.../{session_name}/{something}_{something}_{something}_{condition}_{category}_{img_name}
"""
def __call__(self, full_path):
session_name = full_path.split("/")[-2]
img_name = full_path.split("/")[-1]
condition = img_name.split("_")[3]
category = img_name.split("_")[4]
return session_name, img_name, condition, category
|
etc/base_config.py | yandexdataschool/everware | 130 | 12725350 | # Basic configuration, you should not use this directly
# instead checkout local_config.py or local_dockermacine_config.py
# spawn with custom docker containers
c.JupyterHub.spawner_class = 'everware.CustomDockerSpawner'
c.Spawner.tls = False
c.Spawner.debug = True
c.Spawner.start_timeout = 1000
c.Spawner.http_timeout = 60
c.Spawner.poll_interval = 5
c.Spawner.remove_containers = True
c.Spawner.tls_assert_hostname = False
c.Spawner.use_docker_client_env = True
# give users an opportunity to restore any images via docker or not. Default: True
# c.Spawner.share_user_images = False
# c.Authenticator.admin_users = {'anaderi', 'astiunov'}
# The docker containers need access to the Hub API, so the default
# loopback address doesn't work
from jupyter_client.localinterfaces import public_ips
c.JupyterHub.hub_ip = public_ips()[0]
c.JupyterHub.data_files_path = 'share'
c.JupyterHub.template_paths = ['share/static/html']
|
third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/models/testharness_results_unittest.py | wenfeifei/miniblink49 | 5,964 | 12725381 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from webkitpy.layout_tests.models import testharness_results
class TestHarnessResultCheckerTest(unittest.TestCase):
def test_is_testharness_output(self):
test_data = [
{'content': 'foo', 'result': False},
{'content': '', 'result': False},
{'content': ' ', 'result': False},
{'content': 'This is a testharness.js-based test.\nHarness: the test ran to completion.', 'result': True},
{'content': '\n \r This is a testharness.js-based test. \n \r \n \rHarness: the test ran to completion. \n\n', 'result': True},
{'content': ' This \nis a testharness.js-based test.\nHarness: the test ran to completion.', 'result': False},
{'content': 'This is a testharness.js-based test. Harness: the test ran to completion.', 'result': False},
{'content': 'This is a testharness.js-based test.\nFoo bar \n Harness: the test ran to completion.', 'result': True},
{'content': 'This is a testharness.js-based test.\nFAIL: bah \n Harness: the test ran to completion.\n\n\n', 'result': True},
]
for data in test_data:
self.assertEqual(data['result'], testharness_results.is_testharness_output(data['content']))
def test_is_testharness_output_passing(self):
test_data = [
{'content': 'This is a testharness.js-based test.\n Harness: the test ran to completion.', 'result': True},
{'content': 'This is a testharness.js-based test.\n \n Harness: the test ran to completion.', 'result': False},
{'content': 'This is a testharness.js-based test.\n PASS: foo bar \n Harness: the test ran to completion.', 'result': True},
{'content': 'This is a testharness.js-based test.\n PASS: foo bar FAIL \n Harness: the test ran to completion.', 'result': True},
{'content': 'This is a testharness.js-based test.\n PASS: foo bar \nFAIL \n Harness: the test ran to completion.', 'result': False},
{'content': 'This is a testharness.js-based test.\n CONSOLE ERROR: BLAH \n Harness: the test ran to completion.', 'result': True},
{'content': 'This is a testharness.js-based test.\n CONSOLE WARNING: BLAH \n Harness: the test ran to completion.', 'result': True},
{'content': 'This is a testharness.js-based test.\n Foo bar \n Harness: the test ran to completion.', 'result': False},
{'content': 'This is a testharness.js-based test.\n FAIL: bah \n Harness: the test ran to completion.', 'result': False},
{'content': 'This is a testharness.js-based test.\n TIMEOUT: bah \n Harness: the test ran to completion.', 'result': False},
{'content': 'This is a testharness.js-based test.\n NOTRUN: bah \n Harness: the test ran to completion.', 'result': False},
{'content': 'CONSOLE LOG: error.\nThis is a testharness.js-based test.\nPASS: things are fine.\nHarness: the test ran to completion.\n\n', 'result': True},
{'content': 'CONSOLE ERROR: error.\nThis is a testharness.js-based test.\nPASS: things are fine.\nHarness: the test ran to completion.\n\n', 'result': True},
{'content': 'CONSOLE WARNING: error.\nThis is a testharness.js-based test.\nPASS: things are fine.\nHarness: the test ran to completion.\n\n', 'result': True},
{'content': 'RANDOM TEXT.\nThis is a testharness.js-based test.\nPASS: things are fine.\n.Harness: the test ran to completion.\n\n', 'result': False},
]
for data in test_data:
self.assertEqual(data['result'], testharness_results.is_testharness_output_passing(data['content']))
def test_is_testharness_output_with_console_errors_and_warnings(self):
test_data = [
{'content': 'This is a testharness.js-based test.\nCONSOLE ERROR: This is an error.\nTest ran to completion.', 'result': True},
{'content': 'This is a testharness.js-based test.\nCONSOLE WARNING: This is a warning.\nTest ran to completion.', 'result': True},
{'content': 'CONSOLE ERROR: This is an error.\nTest ran to completion.', 'result': True},
{'content': 'CONSOLE WARNING: This is a warning.\nTest ran to completion.', 'result': True},
{'content': 'This is a testharness.js-based test.\nCONSOLE ERROR: This is an error.', 'result': True},
{'content': 'CONSOLE ERROR: This is an error.', 'result': True},
{'content': 'CONSOLE WARNING: This is a warning.', 'result': True},
{'content': 'This is a testharness.js-based test.\nCONSOLE MESSAGE: This is not error.', 'result': False},
{'content': 'This is a testharness.js-based test.\nNo errors here.', 'result': False},
{'content': 'This is not a CONSOLE ERROR, sorry.', 'result': False},
{'content': 'This is not a CONSOLE WARNING, sorry.', 'result': False},
]
for data in test_data:
self.assertEqual(data['result'], testharness_results.is_testharness_output_with_console_errors_or_warnings(data['content']))
|
src/oscar_accounts/management/commands/oscar_accounts_init.py | n8snyder/django-oscar-accounts | 149 | 12725438 | from django.core.management.base import BaseCommand
from oscar_accounts.setup import create_default_accounts
class Command(BaseCommand):
help = "Initialize oscar accounts default structure"
def handle(self, *args, **options):
create_default_accounts()
|
utils/font_tool.py | gregbugaj/TextGenerator | 166 | 12725462 | <gh_stars>100-1000
from fontTools.fontBuilder import TTFont
fonts = {}
def check(char, font_path):
if font_path in fonts:
font = fonts.get(font_path)
else:
font = TTFont(font_path)
fonts[font_path] = font
utf8_char = char.encode("unicode_escape").decode('utf-8')
if utf8_char.startswith('\\u'):
uc = "uni" + utf8_char[2:].upper()
f = font.getGlyphSet().get(uc)
if f and f._glyph.numberOfContours:
return True
else:
return False
return True
|
lnbits/extensions/lnurlpos/migrations.py | blackcoffeexbt/lnbits-legend | 258 | 12725469 | async def m001_initial(db):
"""
Initial lnurlpos table.
"""
await db.execute(
f"""
CREATE TABLE lnurlpos.lnurlposs (
id TEXT NOT NULL PRIMARY KEY,
key TEXT NOT NULL,
title TEXT NOT NULL,
wallet TEXT NOT NULL,
currency TEXT NOT NULL,
timestamp TIMESTAMP NOT NULL DEFAULT {db.timestamp_now}
);
"""
)
await db.execute(
f"""
CREATE TABLE lnurlpos.lnurlpospayment (
id TEXT NOT NULL PRIMARY KEY,
posid TEXT NOT NULL,
payhash TEXT,
payload TEXT NOT NULL,
pin INT,
sats INT,
timestamp TIMESTAMP NOT NULL DEFAULT {db.timestamp_now}
);
"""
)
|
Chapter 05/crack_zip.py | Prakshal2607/Effective-Python-Penetration-Testing | 346 | 12725477 | import zipfile
filename = 'test.zip'
dictionary = 'passwordlist.txt'
password = None
file_to_open = zipfile.ZipFile(filename)
with open(dictionary, 'r') as f:
for line in f.readlines():
password = line.strip('\n')
try:
file_to_open.extractall(pwd=password)
password = '<PASSWORD>' % password
print password
except:
pass |
notebook/random_random.py | vhn0912/python-snippets | 174 | 12725509 | import random
print(random.random())
# 0.4496839011176701
random.seed(0)
print(random.random())
# 0.8444218515250481
print(random.random())
# 0.7579544029403025
random.seed(0)
print(random.random())
# 0.8444218515250481
print(random.random())
# 0.7579544029403025
|
dreamplace/ops/density_overflow/density_overflow.py | xiefei1026/DREAMPlace | 323 | 12725524 | ##
# @file density_overflow.py
# @author <NAME>
# @date Jun 2018
# @brief Compute density overflow
#
import math
import torch
from torch import nn
from torch.autograd import Function
from dreamplace.ops.density_map.density_map import DensityMap as DensityMap
import pdb
class DensityOverflow(DensityMap):
"""
@brief Compute density overflow for both movable and fixed cells.
The density map for fixed cells is pre-computed.
Each call will only compute the density map for movable cells.
"""
def __init__(self, node_size_x, node_size_y, bin_center_x, bin_center_y,
target_density, xl, yl, xh, yh, bin_size_x, bin_size_y,
num_movable_nodes, num_terminals, num_filler_nodes):
"""
@brief initialization
@param node_size_x cell width array consisting of movable cells, fixed cells, and filler cells in order
@param node_size_y cell height array consisting of movable cells, fixed cells, and filler cells in order
@param bin_center_x bin center x locations
@param bin_center_y bin center y locations
@param target_density target density
@param xl left boundary
@param yl bottom boundary
@param xh right boundary
@param yh top boundary
@param bin_size_x bin width
@param bin_size_y bin height
@param num_movable_nodes number of movable cells
@param num_terminals number of fixed cells
@param num_filler_nodes number of filler cells
"""
super(DensityOverflow,
self).__init__(node_size_x=node_size_x,
node_size_y=node_size_y,
bin_center_x=bin_center_x,
bin_center_y=bin_center_y,
xl=xl,
yl=yl,
xh=xh,
yh=yh,
bin_size_x=bin_size_x,
bin_size_y=bin_size_y,
num_movable_nodes=num_movable_nodes,
num_terminals=num_terminals,
num_filler_nodes=num_filler_nodes)
self.target_density = target_density
def forward(self, pos):
"""
@brief API
@param pos cell locations. The array consists of x locations of movable cells, fixed cells, and filler cells, then y locations of them
"""
density_map = super(DensityOverflow, self).forward(pos)
bin_area = self.bin_size_x * self.bin_size_y
density_cost = (density_map -
self.target_density * bin_area).clamp_(min=0.0).sum()
return density_cost, density_map.max() / bin_area
|
tests/deployment/sagemaker/sagemaker_moto/__init__.py | Shumpei-Kikuta/BentoML | 3,451 | 12725594 | <reponame>Shumpei-Kikuta/BentoML<gh_stars>1000+
from moto.core.models import base_decorator
from tests.deployment.sagemaker.sagemaker_moto.model import sagemaker_backends
moto_mock_sagemaker = base_decorator(sagemaker_backends)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.