repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
stijnvanhoey/pybreach | pybreach/pybreach.py | 1 | 6958 | # -*- coding: utf-8 -*-
import numpy as np
def left_reach(perf_meas, i, pct_beh):
"""calculate the left reach
For all model realisations, given a data evaluation point index and a degree of tolerance, get the
left reach for all realisations and derive the maximum
Parameters
----------
perf_meas : 2-D numpy ndarray
Input array of shape NxM with N the number of model realisations and M
the number of model evaluation points (time steps, measured values)
i : int
the index of the array defining the current model evaluation point to
calculate the left reach
pct_beh : float [0-1]
degree of tolerance, defining the percentage of points that do not comply
in order to continue the reach calculation
Returns
-------
overzichtl : numpy ndarray
Nx5 output array with on the individual columns (1) the index,
(2) a zero column used for internal calculation, (3) the number of
failures, (4) the reach and (5) the span for each of the model
realisations
maxreachl : int
maximum left reach of all model realisations
"""
par_size, data_size = perf_meas.shape
# prepare the left reach overview array
overzichtl = np.array([np.arange(par_size),
np.ones(par_size),
np.zeros(par_size),
np.empty(par_size),
np.empty(par_size)]
).transpose().astype(float)
overzichtl[:, 3:5] = np.nan
# derive for each par_set the length of the reach
aantl = 0
while (aantl <= i) & (sum(overzichtl[:, 1].astype(int)) != 0):
overzichtl[(overzichtl[:, 1] == 1) &
(np.abs(perf_meas[:, i - aantl]) == 0), 2] = \
overzichtl[(overzichtl[:, 1] == 1) &
(np.abs(perf_meas[:, i - aantl]) == 0),
2] + 1 # vwe 2x
aantl += 1
overzichtl[overzichtl[:, 2] > pct_beh / 100. * aantl, 1] = 0
overzichtl[overzichtl[:, 1] == 1, 3] = aantl
# correct the reach length on end-of-line zeros
if all(np.isnan(overzichtl[:, 3])):
maxreachl = i
else:
maxreachl = i - (np.nanmax(overzichtl[:, 3], axis=0)).astype(int) + 1
while np.all(np.abs(perf_meas[i - overzichtl[:, 3].astype(
int) + 1 == maxreachl, maxreachl]) == 0): # vwe
overzichtl[i - overzichtl[:, 3].astype(int) + 1 ==
maxreachl, 2:4] = \
overzichtl[i - overzichtl[:, 3].astype(int) + 1 ==
maxreachl, 2:4] - 1
maxreachl += 1
overzichtl[~np.isnan(overzichtl[:, 3]), 4] = i - \
overzichtl[~np.isnan(
overzichtl[:, 3]), 3] + 1
return overzichtl, maxreachl
def right_reach(perf_meas, i, pct_beh):
"""calculate the right reach
For all model realisations, given a data evaluation point index and a degree of tolerance, get the
right reach for all realisations and derive the maximum
Parameters
----------
perf_meas : 2-D numpy ndarray
Input array of shape NxM with N the number of model realisations and M
the number of model evaluation points (time steps, measured values)
i : int
the index of the array defining the current model evaluation point to
calculate the left reach
pct_beh : float [0-1]
degree of tolerance, defining the percentage of points that do not comply
in order to continue the reach calculation
Returns
-------
overzichtr : numpy ndarray
Nx5 output array with on the individual columns (1) the index,
(2) a zero column used for internal calculation, (3) the number of
failures, (4) the reach and (5) the span for each of the model
realisations
maxreachr : int
maximum right reach of all model realisations
"""
par_size, data_size = perf_meas.shape
# prepare the right reach overview array
overzichtr = np.array([np.arange(par_size),
np.ones(par_size),
np.zeros(par_size),
np.empty(par_size),
np.empty(par_size)]
).transpose().astype(float)
overzichtr[:, 3:5] = np.nan
# derive for each par_set the length of the reach
aantr = 0
while (i + aantr < data_size) & \
(sum(overzichtr[:, 1].astype(int)) != 0):
overzichtr[(overzichtr[:, 1] == 1) &
(np.abs(perf_meas[:, i + aantr]) == 0), 2] = \
overzichtr[(overzichtr[:, 1] == 1) &
(np.abs(perf_meas[:, i + aantr]) == 0),
2] + 1 # vwe 2x
aantr += 1
overzichtr[overzichtr[:, 2] > pct_beh / 100. * aantr, 1] = 0
overzichtr[overzichtr[:, 1] == 1, 3] = aantr
# correct the reach length o end-of-line zeros
if all(np.isnan(overzichtr[:, 3])):
maxreachr = i
else:
maxreachr = i + (np.nanmax(overzichtr[:, 3], axis=0)).astype(int) - 1
while np.all(np.abs(perf_meas[i + overzichtr[:, 3].astype(
int) - 1 == maxreachr, maxreachr]) == 0): # vwe
overzichtr[i + overzichtr[:, 3].astype(int) - 1 ==
maxreachr, 2:4] = \
overzichtr[i + overzichtr[:, 3].astype(int) - 1 ==
maxreachr, 2:4] - 1
maxreachr -= 1
overzichtr[~np.isnan(overzichtr[:, 3]), 4] = i + \
overzichtr[~np.isnan(overzichtr[:, 3]), 3] - 1
return overzichtr, maxreachr
def breach_run(perf_meas, pcten): #, vwe
"""derive breach for a given performance matrix
Parameters
----------
perf_meas : 2-D numpy ndarray
Input array of shape NxM with N the number of model realisations and M
the number of model evaluation points (time steps, measured values)
pcten: list
list of degrees of tolerance tolerance, defining the percentage of points
that are allowed to fail
vwe: Not yet implemented!
Returns
-------
breach : numpy ndarray
For each of the degrees of tolerance, the left and right reach for each
of the data points
"""
breach = np.empty((perf_meas.shape[1], 2 * pcten.size), dtype=int)
par_size, data_size = perf_meas.shape
# par_maxreach
for i in range(data_size):
for j, pct_beh in enumerate(pcten):
# ----- LEFT REACH ------
overzichtl, maxreachl = left_reach(perf_meas, i, pct_beh)
breach[i, 2 * j] = maxreachl
# ----- RIGHT REACH ------
overzichtr, maxreachr = right_reach(perf_meas, i, pct_beh)
breach[i, 2 * j + 1] = maxreachr
return breach # par_maxreach
| mit | -1,656,142,593,568,342,000 | 37.655556 | 102 | 0.550158 | false | 3.667897 | false | false | false |
bashhack/MIT_CompSci_Degree | MIT_600SC/Unit_1/Lecture_4_Examples.py | 1 | 3191 | # Original (From Lecture 3)
# x = 0.5
# epsilon = 0.01
# numGuesses = 0
# low = 0.0
# high = x
# ans = (high + low)/2.0
# while abs(ans**2 - x) >= epsilon and ans <= x:
# print 'high=', high, 'low=', low, 'ans=', ans
# numGuesses += 1
# if ans**2 < x:
# low = ans
# else:
# high = ans
# ans = (high + low)/2.0
# print 'numGuesses =', numGuesses
# print ans, 'is close to square root of', x
# Fixing for our error with nums less than 1:
# Our condition for the while loop was failing
# because the answer was outside of our search area.
# We would reach a point where ans = 1.0/2.0 = 0.5
# which would then meet the condition ans <= high,
# keeping us within the loop, infinitely
x = 0.5
epsilon = 0.01
numGuesses = 0
low = 0.0
high = max(x, 1.0) # fixes our initial error, our search range was 0 - .5, but the answer is outside search range
ans = (high + low)/2.0
while abs(ans**2 - x) >= epsilon and ans <= high:
print 'high=', high, 'low=', low, 'ans=', ans
numGuesses += 1
if ans**2 < x:
low = ans
else:
high = ans
ans = (high + low)/2.0
print 'numGuesses =', numGuesses
print ans, 'is close to square root of', x
def withinEpsilon(x, y, epsilon):
"""x,y,epsilon floats. epsilon > 0.0
returns True if x is within epsilon of y"""
return abs(x - y) <= epsilon
print withinEpsilon(2, 3, 1) # 2 - 3 = -1 = 1 = 1 <= 1 = True
val = withinEpsilon(2, 3, 0.5) # 2 - 3 = -1 = 1 = 1 <= 0.5 = False
print val
def f(x):
x = x + 1
print 'x in fn scope =', x
return x
x = 3
z = f(x)
print 'z =', z
print 'x in global scope =', x
def f1(x):
def g():
x = 'abc'
x = x + 1
print 'x =', x
g()
# assert False
return x
x = 3
z = f1(x)
def isEven(i):
"""assumes i a positive int
returns True if i is even, otherwise False"""
return i % 2 == 0
def findRoot(pwr, val, epsilon):
"""assumes pwr an int; val, epsilon floats
pwr and epsilon > 0
if it exists,
returns a value within epsilon of val**pwr
otherwise returns None"""
assert type(pwr) == int and type(val) == float and type(epsilon) == float
assert pwr > 0 and epsilon > 0
if isEven(pwr) and val < 0:
return None
low = -abs(val)
high = max(abs(val), 1.0)
ans = (high + low)/2.0
while not withinEpsilon(ans**pwr, val, epsilon):
# print 'ans =', ans, 'low =', low, 'high =', high
if ans**pwr < val:
low = ans
else:
high = ans
ans = (high + low)/2.0
return ans
def testFindRoot():
"""x float, epsilon float, pwr positive int"""
for x in (-1.0, 1.0, 3456.0):
for pwr in (1, 2, 3):
ans = findRoot(pwr, x, 0.001)
if ans is None:
print 'The answer is imaginary'
else:
print ans, 'to the power of', pwr, 'is close to', x
sumDigits = 0
for c in str(1952):
sumDigits += int(c)
print 'sumDigits =', sumDigits
x = 100
divisors = ()
for i in range(1, x):
if x % i == 0:
divisors = divisors + (i,)
print divisors[0]
print divisors[1]
print divisors[2]
print divisors[2:4]
| mit | 725,222,314,473,126,000 | 21.314685 | 114 | 0.555939 | false | 2.976679 | false | false | false |
prkumar/uplink | uplink/helpers.py | 1 | 3403 | # Standard library imports
import collections
# Local imports
from uplink import interfaces, utils
from uplink.clients import io
def get_api_definitions(service):
"""
Returns all attributes with type
`uplink.interfaces.RequestDefinitionBuilder` defined on the given
class.
Note:
All attributes are considered, not only defined directly on the class.
Args:
service: A class object.
"""
# In Python 3.3, `inspect.getmembers` doesn't respect the descriptor
# protocol when the first argument is a class. In other words, the
# function includes any descriptors bound to `service` as is rather
# than calling the descriptor's __get__ method. This is seemingly
# fixed in Python 2.7 and 3.4+ (TODO: locate corresponding bug
# report in Python issue tracker). Directly invoking `getattr` to
# force Python's attribute lookup protocol is a decent workaround to
# ensure parity:
class_attributes = ((k, getattr(service, k)) for k in dir(service))
is_definition = interfaces.RequestDefinitionBuilder.__instancecheck__
return [(k, v) for k, v in class_attributes if is_definition(v)]
def set_api_definition(service, name, definition):
setattr(service, name, definition)
class RequestBuilder(object):
def __init__(self, client, converter_registry, base_url):
self._method = None
self._relative_url_template = utils.URIBuilder("")
self._return_type = None
self._client = client
self._base_url = base_url
# TODO: Pass this in as constructor parameter
# TODO: Delegate instantiations to uplink.HTTPClientAdapter
self._info = collections.defaultdict(dict)
self._context = {}
self._converter_registry = converter_registry
self._transaction_hooks = []
self._request_templates = []
@property
def client(self):
return self._client
@property
def method(self):
return self._method
@method.setter
def method(self, method):
self._method = method
@property
def base_url(self):
return self._base_url
def set_url_variable(self, variables):
self._relative_url_template.set_variable(variables)
@property
def relative_url(self):
return self._relative_url_template.build()
@relative_url.setter
def relative_url(self, url):
self._relative_url_template = utils.URIBuilder(url)
@property
def info(self):
return self._info
@property
def context(self):
return self._context
@property
def transaction_hooks(self):
return iter(self._transaction_hooks)
def get_converter(self, converter_key, *args, **kwargs):
return self._converter_registry[converter_key](*args, **kwargs)
@property
def return_type(self):
return self._return_type
@return_type.setter
def return_type(self, return_type):
self._return_type = return_type
@property
def request_template(self):
return io.CompositeRequestTemplate(self._request_templates)
@property
def url(self):
return utils.urlparse.urljoin(self.base_url, self.relative_url)
def add_transaction_hook(self, hook):
self._transaction_hooks.append(hook)
def add_request_template(self, template):
self._request_templates.append(template)
| mit | -7,565,611,934,969,535,000 | 27.838983 | 78 | 0.665589 | false | 4.201235 | false | false | false |
hendawy/lotteryapp | lotteryapp/models.py | 1 | 4998 | # stdlib imports
import uuid
import random
import datetime
# django imports
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.db import IntegrityError
from django.core.exceptions import ValidationError
from django.utils.timezone import utc
from django.core import urlresolvers
# local imports
from utils.hash import random_base36_string
class Lottery(models.Model):
"""Lottery model"""
hash_key = models.UUIDField(default=uuid.uuid4, editable=False)
title = models.CharField(max_length=500)
registration_deadline = models.DateTimeField()
created = models.DateTimeField(auto_now_add=True)
# In order not to retrieve winner every time.
# Using a string instead of a class because class wasn't defined yet
winner = models.ForeignKey(
'LotteryParticipant', null=True,
blank=True, related_name='lottery_winner', on_delete=models.SET_NULL)
active = models.BooleanField(default=True)
class Meta:
verbose_name = _('Lottery')
verbose_name_plural = _('Lotteries')
def __unicode__(self):
return unicode('%s: %i' % (self.title, self.pk))
def get_winner(self):
"""Returning the previously selected winner if exists,
if not, select one and return it
"""
if self.winner is not None:
return self.winner
else:
participants = LotteryParticipant.objects.filter(lottery=self)
if participants.count() > 0:
winner = random.choice(list(participants))
winner.is_winner = True
winner.save()
winner = LotteryParticipant.objects.get(
is_winner=True, lottery=self)
self.winner = winner
self.save()
return winner
def is_active(self):
"""Determines if a lottery is still accepting applicants"""
return self.active and self.registration_deadline > \
datetime.datetime.now().replace(tzinfo=utc) and self.winner is None
def get_url(self):
"""Returns either the registration url or the winner url"""
if self.winner is None:
return urlresolvers.reverse(
'lotteryapp:registration_form', args=[self.hash_key])
else:
return urlresolvers.reverse(
'lotteryapp:lottery_winner', args=[self.hash_key])
class LotteryParticipant(models.Model):
"""Lottery participants model"""
hash_key = models.UUIDField(default=uuid.uuid4, editable=False)
lottery = models.ForeignKey(Lottery)
email = models.EmailField()
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
entry_code = models.CharField(max_length=10, blank=True)
is_winner = models.BooleanField(default=False)
registerd = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = _('Lottery Participant')
verbose_name_plural = _('Lottery Participants')
unique_together = ('email', 'entry_code')
def __unicode__(self):
return unicode('%s %s: %s' % (
self.first_name, self.last_name, self.lottery))
def save(self, *args, **kwargs):
"""Extra functionalities before saving participant.
Note: will not execute in case of pulk insert
"""
# Raising exception in case participant is set as a winner
# and lottery already has a winnder
if self.is_winner and type(self).objects.filter(
lottery=self.lottery, is_winner=True).count() > 1:
raise IntegrityError(
'Lottery %s already has a winner' % self.lottery)
# Creates a random base36 entry code of 10 digits
# possibility of collision = 1/36^10 => almost 0
if self.entry_code is None or self.entry_code == '':
self.entry_code = random_base36_string(size=10)
# email to lowercase
if self.email is not None and self.email != '':
self.email = self.email.lower()
super(LotteryParticipant, self).save(*args, **kwargs)
def validate_unique(self, exclude=None, *args, **kwargs):
"""extends validation on unique values to determine whether
email already registerd for lottery or not
"""
participants = LotteryParticipant.objects.filter(
email=self.email.lower(), lottery=self.lottery)
if participants.count() > 0 and self.id is None:
raise ValidationError({
'email': ['Email already exists for this lottery']})
for participant in participants:
if participant.id != self.id:
raise ValidationError({
'email': ['Email already exists for this lottery']})
super(LotteryParticipant, self).validate_unique(
exclude=exclude, *args, **kwargs)
def full_name(self):
return '%s %s' % (self.first_name, self.last_name)
| gpl-2.0 | -5,663,226,053,732,584,000 | 37.152672 | 79 | 0.633253 | false | 3.929245 | false | false | false |
jmagnusson/flask-admin | examples/sqla-filter-selectable/app.py | 15 | 2298 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import flask_admin as admin
from flask_admin.contrib import sqla
# Create application
app = Flask(__name__)
# Create dummy secrey key so we can use sessions
app.config['SECRET_KEY'] = '123456790'
# Create in-memory database
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///sample_db_3.sqlite'
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
# Flask views
@app.route('/')
def index():
return '<a href="/admin/">Click me to get to Admin!</a>'
class Person(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
pets = db.relationship('Pet', backref='person')
def __unicode__(self):
return self.name
class Pet(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
person_id = db.Column(db.Integer, db.ForeignKey('person.id'))
available = db.Column(db.Boolean)
def __unicode__(self):
return self.name
class PersonAdmin(sqla.ModelView):
""" Override ModelView to filter options available in forms. """
def create_form(self):
return self._use_filtered_parent(
super(PersonAdmin, self).create_form()
)
def edit_form(self, obj):
return self._use_filtered_parent(
super(PersonAdmin, self).edit_form(obj)
)
def _use_filtered_parent(self, form):
form.pets.query_factory = self._get_parent_list
return form
def _get_parent_list(self):
# only show available pets in the form
return Pet.query.filter_by(available=True).all()
def __unicode__(self):
return self.name
# Create admin
admin = admin.Admin(app, name='Example: SQLAlchemy - Filtered Form Selectable',
template_mode='bootstrap3')
admin.add_view(PersonAdmin(Person, db.session))
admin.add_view(sqla.ModelView(Pet, db.session))
if __name__ == '__main__':
# Recreate DB
db.drop_all()
db.create_all()
person = Person(name='Bill')
pet1 = Pet(name='Dog', available=True)
pet2 = Pet(name='Fish', available=True)
pet3 = Pet(name='Ocelot', available=False)
db.session.add_all([person, pet1, pet2, pet3])
db.session.commit()
# Start app
app.run(debug=True)
| bsd-3-clause | -1,054,119,313,552,133,500 | 25.113636 | 79 | 0.64752 | false | 3.359649 | false | false | false |
eunchong/build | scripts/slave/recipe_modules/skia_swarming/api.py | 1 | 9701 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine import recipe_api
import shlex
DEFAULT_TASK_EXPIRATION = 4*60*60
DEFAULT_TASK_TIMEOUT = 60*60
class SkiaSwarmingApi(recipe_api.RecipeApi):
"""Provides steps to run Skia tasks on swarming bots."""
@property
def swarming_temp_dir(self):
"""Path where artifacts like isolate file and json output will be stored."""
return self.m.path['slave_build'].join('swarming_temp_dir')
@property
def tasks_output_dir(self):
"""Directory where the outputs of the swarming tasks will be stored."""
return self.swarming_temp_dir.join('outputs')
def isolated_file_path(self, task_name):
"""Get the path to the given task's .isolated file."""
return self.swarming_temp_dir.join('skia-task-%s.isolated' % task_name)
def setup(self, luci_go_dir, swarming_rev=None):
"""Performs setup steps for swarming."""
self.m.swarming_client.checkout(revision=swarming_rev)
self.m.swarming.check_client_version()
self.setup_go_isolate(luci_go_dir)
# TODO(rmistry): Remove once the Go binaries are moved to recipes or buildbot.
def setup_go_isolate(self, luci_go_dir):
"""Generates and puts in place the isolate Go binary."""
self.m.step('download luci-go linux',
['download_from_google_storage', '--no_resume',
'--platform=linux*', '--no_auth', '--bucket', 'chromium-luci',
'-d', luci_go_dir.join('linux64')])
self.m.step('download luci-go mac',
['download_from_google_storage', '--no_resume',
'--platform=darwin', '--no_auth', '--bucket', 'chromium-luci',
'-d', luci_go_dir.join('mac64')])
self.m.step('download luci-go win',
['download_from_google_storage', '--no_resume',
'--platform=win32', '--no_auth', '--bucket', 'chromium-luci',
'-d', luci_go_dir.join('win64')])
# Copy binaries to the expected location.
dest = self.m.path['slave_build'].join('luci-go')
self.m.file.rmtree('Go binary dir', dest)
self.m.file.copytree('Copy Go binary',
source=luci_go_dir,
dest=dest)
def isolate_and_trigger_task(
self, isolate_path, isolate_base_dir, task_name, isolate_vars,
swarm_dimensions, isolate_blacklist=None, extra_isolate_hashes=None,
idempotent=False, store_output=True, extra_args=None, expiration=None,
hard_timeout=None):
"""Isolate inputs and trigger the task to run."""
os_type = swarm_dimensions.get('os', 'linux')
isolated_hash = self.isolate_task(
isolate_path, isolate_base_dir, os_type, task_name, isolate_vars,
blacklist=isolate_blacklist, extra_hashes=extra_isolate_hashes)
tasks = self.trigger_swarming_tasks([(task_name, isolated_hash)],
swarm_dimensions,
idempotent=idempotent,
store_output=store_output,
extra_args=extra_args,
expiration=expiration,
hard_timeout=hard_timeout)
assert len(tasks) == 1
return tasks[0]
def isolate_task(self, isolate_path, base_dir, os_type, task_name,
isolate_vars, blacklist=None, extra_hashes=None):
"""Isolate inputs for the given task."""
self.create_isolated_gen_json(isolate_path, base_dir, os_type,
task_name, isolate_vars,
blacklist=blacklist)
hashes = self.batcharchive([task_name])
assert len(hashes) == 1
isolated_hash = hashes[0][1]
if extra_hashes:
isolated_hash = self.add_isolated_includes(task_name, extra_hashes)
return isolated_hash
def create_isolated_gen_json(self, isolate_path, base_dir, os_type,
task_name, extra_variables, blacklist=None):
"""Creates an isolated.gen.json file (used by the isolate recipe module).
Args:
isolate_path: path obj. Path to the isolate file.
base_dir: path obj. Dir that is the base of all paths in the isolate file.
os_type: str. The OS type to use when archiving the isolate file.
Eg: linux.
task_name: str. The isolated.gen.json file will be suffixed by this str.
extra_variables: dict of str to str. The extra vars to pass to isolate.
Eg: {'SLAVE_NUM': '1', 'MASTER': 'ChromiumPerfFYI'}
blacklist: list of regular expressions indicating which files/directories
not to archive.
"""
self.m.file.makedirs('swarming tmp dir', self.swarming_temp_dir)
isolated_path = self.isolated_file_path(task_name)
isolate_args = [
'--isolate', isolate_path,
'--isolated', isolated_path,
'--config-variable', 'OS', os_type,
]
if blacklist:
for b in blacklist:
isolate_args.extend(['--blacklist', b])
for k, v in extra_variables.iteritems():
isolate_args.extend(['--extra-variable', k, v])
isolated_gen_dict = {
'version': 1,
'dir': base_dir,
'args': isolate_args,
}
isolated_gen_json = self.swarming_temp_dir.join(
'%s.isolated.gen.json' % task_name)
self.m.file.write(
'Write %s.isolated.gen.json' % task_name,
isolated_gen_json,
self.m.json.dumps(isolated_gen_dict, indent=4),
)
def batcharchive(self, targets):
"""Calls batcharchive on the skia.isolated.gen.json file.
Args:
targets: list of str. The suffixes of the isolated.gen.json files to
archive.
Returns:
list of tuples containing (task_name, swarming_hash).
"""
return self.m.isolate.isolate_tests(
verbose=True, # To avoid no output timeouts.
build_dir=self.swarming_temp_dir,
targets=targets).presentation.properties['swarm_hashes'].items()
def add_isolated_includes(self, task_name, include_hashes):
"""Add the hashes to the task's .isolated file, return new .isolated hash.
Args:
task: str. Name of the task to which to add the given hash.
include_hashes: list of str. Hashes of the new includes.
Returns:
Updated hash of the .isolated file.
"""
isolated_file = self.isolated_file_path(task_name)
self.m.python.inline('add_isolated_input', program="""
import json
import sys
with open(sys.argv[1]) as f:
isolated = json.load(f)
for h in sys.argv[2:]:
isolated['includes'].append(h)
with open(sys.argv[1], 'w') as f:
json.dump(isolated, f, sort_keys=True)
""", args=[isolated_file] + include_hashes)
isolateserver = self.m.swarming_client.path.join('isolateserver.py')
r = self.m.python('upload new .isolated file for %s' % task_name,
script=isolateserver,
args=['archive', '--isolate-server',
self.m.isolate.isolate_server, isolated_file],
stdout=self.m.raw_io.output())
return shlex.split(r.stdout)[0]
def trigger_swarming_tasks(
self, swarm_hashes, dimensions, idempotent=False, store_output=True,
extra_args=None, expiration=None, hard_timeout=None):
"""Triggers swarming tasks using swarm hashes.
Args:
swarm_hashes: list of str. List of swarm hashes from the isolate server.
dimensions: dict of str to str. The dimensions to run the task on.
Eg: {'os': 'Ubuntu', 'gpu': '10de', 'pool': 'Skia'}
idempotent: bool. Whether or not to de-duplicate tasks.
store_output: bool. Whether task output should be stored.
extra_args: list of str. Extra arguments to pass to the task.
expiration: int. Task will expire if not picked up within this time.
DEFAULT_TASK_EXPIRATION is used if this argument is None.
hard_timeout: int. Task will timeout if not completed within this time.
DEFAULT_TASK_TIMEOUT is used if this argument is None.
Returns:
List of swarming.SwarmingTask instances.
"""
swarming_tasks = []
for task_name, swarm_hash in swarm_hashes:
swarming_task = self.m.swarming.task(
title=task_name,
isolated_hash=swarm_hash)
if store_output:
swarming_task.task_output_dir = self.tasks_output_dir.join(task_name)
swarming_task.dimensions = dimensions
swarming_task.idempotent = idempotent
swarming_task.priority = 90
swarming_task.expiration = (
expiration if expiration else DEFAULT_TASK_EXPIRATION)
swarming_task.hard_timeout = (
hard_timeout if hard_timeout else DEFAULT_TASK_TIMEOUT)
if extra_args:
swarming_task.extra_args = extra_args
swarming_tasks.append(swarming_task)
self.m.swarming.trigger(swarming_tasks)
return swarming_tasks
def collect_swarming_task(self, swarming_task):
"""Collects the specified swarming task.
Args:
swarming_task: An instance of swarming.SwarmingTask.
"""
return self.m.swarming.collect_task(swarming_task)
def collect_swarming_task_isolate_hash(self, swarming_task):
"""Wait for the given swarming task to finish and return its output hash.
Args:
swarming_task: An instance of swarming.SwarmingTask.
Returns:
the hash of the isolate output of the task.
"""
res = self.collect_swarming_task(swarming_task)
return res.json.output['shards'][0]['isolated_out']['isolated']
| bsd-3-clause | 4,230,104,533,486,848,500 | 40.814655 | 80 | 0.626636 | false | 3.771773 | false | false | false |
CCI-MOC/GUI-Backend | api/v1/views/quota.py | 1 | 2289 | """
Atmosphere quota rest api.
"""
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from django.shortcuts import get_object_or_404
from api.v1.serializers import QuotaSerializer
from api.v1.views.base import AuthAPIView
from core.models import Quota
class QuotaList(AuthAPIView):
"""
Lists or creates new Quotas
"""
def get(self, request):
"""
Returns a list of all existing Quotas
"""
quotas = Quota.objects.all()
serialized_data = QuotaSerializer(quotas, many=True).data
return Response(serialized_data)
def post(self, request):
"""
Creates a new Quota
"""
data = request.data
serializer = QuotaSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class QuotaDetail(AuthAPIView):
"""
Fetches or updates a Quota
"""
def get(self, request, quota_id):
"""
Return the specified Quota
"""
quota = get_object_or_404(Quota, id=quota_id)
serialized_data = QuotaSerializer(quota).data
return Response(serialized_data)
def put(self, request, quota_id):
"""
Updates the specified Quota
"""
data = request.data
quota = get_object_or_404(Quota, id=quota_id)
serializer = QuotaSerializer(quota, data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def patch(self, request, quota_id):
"""
Partially updates the specified Quota
"""
data = request.data
quota = get_object_or_404(Quota, id=quota_id)
serializer = QuotaSerializer(quota, data=data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| apache-2.0 | -2,640,474,414,428,032,500 | 26.25 | 78 | 0.634338 | false | 4.072954 | false | false | false |
WeAreTheLink/AppNews | TBNA/items.py | 1 | 1174 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
import ast
from scrapy.loader import ItemLoader
# Um artigo possui alem da sua url: um titulo, uma data de publicaçao,
# um corpo e categorias referentes àquele artigo.
class Page(scrapy.Item):
url = scrapy.Field()
title = scrapy.Field()
date_pub = scrapy.Field()
text_article = scrapy.Field()
list_of_categories = scrapy.Field()
#Metodo para resgatar as categorias de um artigo. Existem dois padroes de artigo,
# ambos existentes em javascript, um que se encontra após CATEGORIAS e o outro categories
def createCategories(categories):
if(categories is None):
return None
if("CATEGORIAS: " in categories):
categories = categories[categories.index("CATEGORIAS: "):]
else:
categories = categories[categories.index("categories:"):]
start = categories.index("[")
end = categories.index("]") + 1
list_of_categories = categories[start:end]
return ast.literal_eval(list_of_categories)
| agpl-3.0 | -4,553,603,677,218,598,400 | 33.441176 | 93 | 0.674637 | false | 3.559271 | false | false | false |
HackNC2014/pulse-hacknc | data_analysis.py | 1 | 4088 | import pulse
from analyze import clean
import os
import sys
import tweepy
import json
import datetime
import copy
#keywords = [u'tragedy',u'shooting',u'shooter',u'shoot',u'concert',u'game']
def prune(d):
#keywords = [u'tragedy',u'shooting',u'shooter',u'shoot',u'concert',u'game']
badwords = [u'hour',u'are',u'here',u'much',u'things',u'than',u'there',u'much',u'from',u'still',u'being',u'into',u'out',u'every',u'they',u'now',u'were',u'very',u'after',u'would',u'could',u'can',u'can',u'will',u'doe',u'thats',u'why',u'take',u'cant',u'well',u'look',u'know',u'all',u'ur',u'what',u'who',u'where',u'or',u'do',u'got',u'when',u'no',u'u',u'im',u'dont',u'how',u'if',u'as',u'nd',u'up',u'by',u'what',u'about',u'was',u'',u'its',u'in',u'too',u'a',u'an',u'i',u'he',u'me',u'she',u'we',u'the',u'to',u'are',u'you',u'him',u'her',u'my',u'and',u'is',u'of',u'to',u'rt',u'for',u'on',u'it',u'that',u'this',u'be',u'just',u'like',u'lol',u'rofl',u'lmao',u'your',u'have',u'but',u'you',u'not',u'get',u'so',u'at',u'with']
#return {x for x in d if x not in badwords}
di = {}
for w in d:
if w not in badwords:
di[w] = d[w]
return di
def analyze_test(tweets):
#keywords = [u'tragedy',u'shooting',u'shooter',u'shoot',u'concert',u'game']
keywords = [u'rain',u'rainy',u'storm',u'stormy',u'sunny',u'snow',u'snowy',u'cloudy',u'clear',u'windy',u'wind',u'bright']
temperature_words = [u'cold',u'freezing',u'frigid',u'chilly',u'mild',u'warm',u'hot',u'scorching',u'scorcher',u'heat']
all_tweets = copy.copy(tweets) #copy of tweets
event = None
word_freq = {} #frequency list
for tweet in tweets:
#print clean(tweet['text'])
if tweet['text'][:2].lower() == 'rt':continue
#---Your code goes here! Try stuff to find events!
#^- frequency analysis?
txt = clean(tweet['text'])
#txt = tweet['text']
words = txt.split(' ') #wordlist
#print txt
#print words
for w in words:
if not w.lower() in word_freq:
word_freq[w.lower()] = 1
else:
word_freq[w.lower()] += 1
freq = prune(word_freq)
freq2 = sorted(freq, key=freq.get)
freq = freq2[len(freq2)-30:len(freq2)] #top 30 current words
#print freq
i = 0
for f in freq:
if f in keywords: #tweak
#print "keyword found: "+f
#print len(all_tweets)
for t in all_tweets:
if t['text'][:2].lower() == 'rt':continue
if f in clean(t['text']).split(' '):
if not event:
event = {}
event['tweets'] = []
event['tweets'].append({'latlong':t['latlong'],'timestamp':t['timestamp'],'id':t['id']})
event['keywords'] = [f]
for temp in temperature_words:
if temp in clean(t['text']).split(' '):
event['keywords'].append(temp)
break
i += 1
#---Your code goes above this. Try to find events!
if event:
print("event found:")
#avg_lat = sum(map(lambda x:x['latlong'][0], event['tweets']))/float(len(event['tweets']))
#avg_long = sum(map(lambda x:x['latlong'][1], event['tweets']))/float(len(event['tweets']))
#avg_time = sum(map(lambda x:x['timestamp'], event['tweets']))/float(len(event['tweets']))
#event['latlong'] = str([avg_lat, avg_long])
#event['keywords'] = keywords
#event['timestamp'] = avg_time
print event
print ""
#analyze_session.post("https://luminous-fire-1209.firebaseio.com/events.json",data=json.dumps(event))
#----
num = 100 #number of tweets to parse at a time
f = open('./tweets/20-24_sea_10km', 'r')
lines = f.readlines();
t = []
for l in lines:
tweet = eval(l)
t.append(tweet)
tweets = t[::-1]
f.close()
for i in range(0,len(tweets)/num):
if (i+1)*num > len(tweets):
analyze_test(tweets[i*num:len(tweets)])
else:
analyze_test(tweets[i*num:(i+1)*num])
print "done"
| mit | -1,640,101,916,683,215,400 | 39.475248 | 712 | 0.552593 | false | 2.79235 | false | false | false |
SciDAP/cwltool | tests/test_fetch.py | 1 | 1252 | import unittest
import schema_salad.main
import schema_salad.ref_resolver
import schema_salad.schema
from cwltool.load_tool import load_tool
from cwltool.main import main
from cwltool.workflow import defaultMakeTool
class FetcherTest(unittest.TestCase):
def test_fetcher(self):
class TestFetcher(schema_salad.ref_resolver.Fetcher):
def __init__(self, a, b):
pass
def fetch_text(self, url): # type: (unicode) -> unicode
if url == "baz:bar/foo.cwl":
return """
cwlVersion: v1.0
class: CommandLineTool
baseCommand: echo
inputs: []
outputs: []
"""
else:
raise RuntimeError("Not foo.cwl")
def check_exists(self, url): # type: (unicode) -> bool
if url == "baz:bar/foo.cwl":
return True
else:
return False
def test_resolver(d, a):
return "baz:bar/" + a
load_tool("foo.cwl", defaultMakeTool, resolver=test_resolver, fetcher_constructor=TestFetcher)
self.assertEquals(0, main(["--print-pre", "--debug", "foo.cwl"], resolver=test_resolver,
fetcher_constructor=TestFetcher))
| apache-2.0 | 5,933,240,792,377,437,000 | 29.536585 | 102 | 0.576677 | false | 3.900312 | true | false | false |
pik-copan/pyregimeshifts | scripts/auto_correlation_function/auto_correlation_irregular_sampling.py | 1 | 5228 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2014 Jonathan F. Donges
# Author: Jonathan F. Donges <[email protected]>
# URL: <http://www.pik-potsdam.de/members/donges/software>
"""
Computes auto-correlation function for irregularly sampled time series.
Uses the method proposed in:
Rehfeld, K., Marwan, N., Heitzig, J., & Kurths, J. (2011). Comparison of correlation analysis techniques for irregularly sampled time series. Nonlinear Processes in Geophysics, 18(3), 389-404.
This script provides analyses for this publication:
J.F. Donges, R.V. Donner, N. Marwan, S.F.M. Breitenbach, K. Rehfeld, and J. Kurths,
Nonlinear regime shifts in Holocene Asian monsoon variability: Potential impacts on cultural change and migratory patterns,
Climate of the Past 11, 709-741 (2015),
DOI: 10.5194/cp-11-709-2015
"""
#
# Imports
#
import sys
import numpy as np
import pylab
import progressbar
#
# Settings
#
# Filename
FILENAME_X = "../../data/raw_proxy_data/Dongge_DA.dat"
# Resolution of cross-correlation (units of time)
DELTA_LAG = 10 # Measured in years here
# Maximum lag index
MAX_LAG_INDEX = 100
# Toggle detrending
DETRENDING = True
DETRENDING_WINDOW_SIZE = 1000. # Measured in years here
#
# Functions
#
def detrend_time_series(time, data, window_size):
# Get length of data array
n = data.shape[0]
# Initialize a local copy of data array
detrended_data = np.empty(n)
# Detrend data
for j in xrange(n):
# Get lower and upper bound of window in time domain
lower_bound = time[j] - window_size / 2.
upper_bound = time[j] + window_size / 2.
# Get time indices lying within the window
window_indices = np.logical_and(time >= lower_bound, time <= upper_bound)
# Substract window mean from data point in the center
detrended_data[j] = data[j] - data[window_indices].mean()
return detrended_data
def gaussian(x, std):
"""
Returns value of gaussian distribution at x with 0 mean
and standard deviation std.
"""
return 1 / np.sqrt(2 * np.pi * std) * np.exp(-np.abs(x ** 2) / (2 * std**2) )
def kernel_auto_correlation_est(x, time_diff, kernel_func, kernel_param,
delta_lag, max_lag_index):
"""
Estimates auto correlation using a kernel function.
"""
# Normalize time series
x -= x.mean()
x /= x.std()
# Initialize discrete auto-correlation function
auto_correlation = np.zeros(max_lag_index + 1)
# Loop over all positive lags and zero lag
for k in xrange(max_lag_index + 1):
# Calculate b matrix
b = kernel_func(k * delta_lag - time_diff, kernel_param)
# Calculate nominator
nominator = np.dot(x, np.dot(b, x.transpose()))
# Calculate denominator
denominator = b.sum()
# Calculate auto-correlation
auto_correlation[k] = nominator / denominator
lag_times = delta_lag * np.arange(max_lag_index + 1)
return (lag_times, auto_correlation)
#
# Main script
#
# Load record x
data_x = np.loadtxt(FILENAME_X, unpack=False, usecols=(0,1,), comments="#")
#data_x = np.fromfile(FILENAME_X, sep=" ")
time_x = data_x[:,0]
x = data_x[:,1]
# Detrending of time series using moving window averages
if DETRENDING:
x = detrend_time_series(time_x, x, DETRENDING_WINDOW_SIZE)
# Get length of records
N_x = len(time_x)
# Get recommended standard deviation of gaussian Kernel (Kira Rehfeld's
# NPG paper)
sigma = 0.25 * np.diff(time_x).mean()
print "Length of record x:", N_x
print "Mean sampling time x:", np.diff(time_x).mean()
print "Recommended standard deviation of gaussian Kernel:", sigma
# Calculate matrix of time differences
time_diff = np.zeros((N_x, N_x))
for i in xrange(N_x):
for j in xrange(N_x):
time_diff[i,j] = time_x[i] - time_x[j]
# Estimate auto-correlation function
(lag_times, auto_correlation) = kernel_auto_correlation_est(x=x.copy(), time_diff=time_diff, kernel_func=gaussian, kernel_param=sigma, delta_lag=DELTA_LAG, max_lag_index=MAX_LAG_INDEX)
#
# Save results
#
results = np.zeros((MAX_LAG_INDEX + 1, 2))
results[:,0] = lag_times
results[:,1] = auto_correlation
np.savetxt("kernel_acf_dongge.txt", results)
#
# Plot results
#
# Set plotting parameters (for Clim. Past paper)
params = { 'figure.figsize': (6.,6.),
'axes.labelsize': 12,
'text.fontsize': 12,
'xtick.labelsize': 12,
'ytick.labelsize': 12,
'legend.fontsize': 10,
'title.fontsize': 12,
'text.usetex': False,
'font': 'Helvetica',
'mathtext.bf': 'helvetica:bold',
'xtick.major.pad': 6,
'ytick.major.pad': 6,
'xtick.major.size': 5,
'ytick.major.size': 5,
'tick.labelsize': 'small'
}
#pylab.rcParams.update(params)
# Plot time series
pylab.figure(1)
pylab.plot(time_x, x)
pylab.xlabel("Age (y B.P.)")
pylab.ylabel("Normalized values")
pylab.figure(2)
pylab.plot(lag_times, auto_correlation, "k")
pylab.axhline(y=1 / np.e, color="red")
pylab.xlabel("Time delay [y]")
pylab.ylabel("ACF")
pylab.ylim(-0.5,1)
pylab.savefig("auto_corr_irregular.pdf")
pylab.show()
| mit | 8,030,556,805,946,245,000 | 25.00995 | 192 | 0.647284 | false | 3.028969 | false | false | false |
ormandj/stalker | stalkerweb/setup.py | 1 | 1247 | #!/usr/bin/env python
""" setuptools for stalkerweb """
from setuptools import setup, find_packages
from stalkerweb import __version__ as version
setup(
name='stalkerweb',
version=version,
author="Florian Hines",
author_email="[email protected]",
description="Simple Monitoring System",
url="http://github.com/pandemicsyn/stalker",
packages=find_packages(),
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Environment :: No Input/Output (Daemon)',
],
install_requires=[
'stalkerutils==2.0.2',
'eventlet==0.17.4',
'flask==0.10.1',
'redis==2.10.3',
'pymongo==3.0.3',
'mmh3==2.3.1',
'flask-rethinkdb==0.2',
'rethinkdb==2.1.0.post2',
'flask-bcrypt==0.7.1',
'flask-wtf==0.12',
],
include_package_data=True,
zip_safe=False,
scripts=['bin/stalker-web',],
data_files=[('share/doc/stalkerweb',
['README.md', 'INSTALL',
'etc/stalker-web.conf',
'etc/init.d/stalker-web',
])]
)
| apache-2.0 | 5,466,479,338,180,866,000 | 28.690476 | 61 | 0.553328 | false | 3.435262 | false | false | false |
ppizarror/korektor | bin/langeditor/_export.py | 1 | 1153 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
EXPORT
Permite adaptar un archivo a un texto normal para ser traducido correctamente en google
Autor: PABLO PIZARRO @ github.com/ppizarror
Fecha: 2014-2015
Licencia: GPLv2
"""
__author__ = "ppizarror"
# Importación de librerías
import os
import sys
reload(sys)
# noinspection PyUnresolvedReferences
sys.setdefaultencoding('UTF8') # @UndefinedVariable
DL = " // "
try:
namearchive = raw_input("Ingrese el nombre del archivo que desea transformar: ").replace(".txt", "")
# noinspection PyArgumentEqualDefault
archivo = open(namearchive + ".txt", "r")
except:
print "El archivo no existe!"
exit()
# noinspection PyUnboundLocalVariable
archivo2 = open(namearchive + "_exported" + ".txt", "w")
# noinspection PyUnboundLocalVariable
for linea in archivo:
linea = linea.strip().split(DL)
nwlinea = linea[1].replace("|", " ") + "\n"
archivo2.write("{" + linea[0] + "}\n")
archivo2.write(nwlinea)
archivo.close()
archivo2.close()
print "Archivo generado correctamente"
try:
os.remove("_export.pyc")
except:
pass
| gpl-2.0 | -3,734,875,739,352,997,000 | 23.021739 | 104 | 0.665508 | false | 2.8775 | false | false | false |
blurstudio/cross3d | cross3d/studiomax/mixer/clip.py | 1 | 9259 | ##
# \namespace cross3d.studiomax
#
# \remarks The cross3d.studiomax.Clip module contains an
# abstraction of the MAXScript MXClip class for interacting
# with the Motion Mixer.
#
# \author willc
# \author Blur Studio
# \date 09/28/15
#
import Py3dsMax
from Py3dsMax import mxs
from cross3d import ClipPortion, TrackPortion
from cross3d.abstract.mixer.clip import AbstractClip
################################################################################
#####------------------------------ Classes -------------------------------#####
################################################################################
class StudiomaxClip(AbstractClip):
"""An abstraction of the MAXScript MxClip class.
Attributes:
clip: The ValueWrapper for the MxClip this Clip is wrapping.
track: The Track instance for the MxClip's parent MxTrack.
numWeights: The number of weights in the clip's weight curve
(relevant only when clip is in a layer track)
globStart: The global frame value for the start point of the MxClip
globEnd: The global frame value for the end point of the MxClip
filename: The filename of the bip file used by the MxClip.
scale: The MxClip's scale. Modifying the scale will cause the Clip to
scale on the right edge. The left edge will not move.
"""
@property
def filename(self):
"""The filename of the bip file used by the MxClip."""
return self.clip.filename
@property
def globStart(self):
"""The global frame value for the start point of the MxClip"""
return float(self.clip.globStart)
@property
def globEnd(self):
"""The global frame value for the end point of the MxClip"""
return float(self.clip.globEnd)
@property
def numWeights(self):
"""The number of weights in the clip's weight curve
(relevant only when clip is in a layer track)"""
return int(self.clip.numWeights)
@property
def sourceEnd(self):
return float(self.clip.orgEnd)
@property
def sourceStart(self):
return float(self.clip.orgStart)
@property
def scale(self):
return float(self.clip.scale)
@property
def trimEnd(self):
return float(self.clip.trimEnd)
@property
def trimStart(self):
return float(self.clip.trimStart)
def analyzeWeights(self, occludedPortions):
"""Determines which portions of the Clip are used, and which portions of
the Clip will occlude Tracks below.
Args:
occludedPortions(list): A list of `TrackPortion` instances
for every portion of the Clip that will be occluded
by Tracks above it.
Returns:
tuple: A tuple containing a list of `ClipPortion`
instances for every used portion of the Clip, and a
list of `TrackPortion` instances for every portion of
the Clip that will occlude tracks below it.
"""
if self.track.isTransitionTrack:
# this won't work...
return
clipOcclPortions = []
ClipPortions = []
clipStart, clipEnd = self.globStart, self.globEnd
if self.numWeights:
usedPortions = []
# Initialize the first rangeStart with the global start for the
# clip. We'll modify this if the weights make the clip have no
# effect for part of its duration.
rangeStart, rangeEnd = clipStart, None
# Keep a seperate occluding clip range. We'll keep track of
# occluding clips so we can test against them to update clip ranges
# later on.
occlStart, occlEnd = None, None
prevWVal = 0.0
for wi, (wTime, wVal) in enumerate(self.iterWeights()):
# Always move the end to the current position
rangeEnd = wTime
if wVal == 0.0:
# If the usedPortion has a non-zero length and isn't
# non-effecting for its entire duration, add it to the used
# portions.
if rangeEnd > rangeStart and prevWVal:
usedPortions.append(
TrackPortion(self.track, rangeStart, rangeEnd)
)
# Reset start to current position
rangeStart = wTime
if wVal == 1.0:
# If this is the first weight, start at the beggining of the
# clip, since the curve will extend back past this weight.
if wi == 0:
occlStart = clipStart
# If we already have a start stored for an occluding
# portion, store this weight as the (new) end. Otherwise,
# store it as the start.
if occlStart:
occlEnd = wTime
else:
occlStart = wTime
else:
# If a start and end are stored for the occluding
# TrackPortion, add that TrackPortion to the list of
# occluding portions for this clip.
if occlStart and occlEnd:
clipOcclPortions.append(
TrackPortion(self.track, occlStart, occlEnd)
)
# Clear the occluding start/end, since the track weighting
# is no longer fully occluding.
occlStart, occlEnd = None, None
prevWVal = wVal
# If occlStart is set, add the remainder of the clip to occluding
# clips.
if occlStart:
clipOcclPortions.append(
TrackPortion(self.track, occlStart, clipEnd)
)
# If the clip ended with a non-zero weight, add the remainder as a
# usedPortion.
if wVal:
usedPortions.append(
TrackPortion(self.track, rangeStart, clipEnd)
)
# Finally, we'll clean up the list of ClipPortions by eliminating
# occluded sections of clips, and condensing continuous clips that
# were split where their weight dips tangential to zero.
usedSC = self._occludeClipPortions(usedPortions, occludedPortions)
ClipPortions = self._coalesceClipPortions(usedSC)
else:
clipRange = self.globStart, self.globEnd
clipOcclPortions = [TrackPortion(self.track, *clipRange)]
ClipPortions = self._occludeClipPortions(
[ClipPortion(self, *clipRange)],
occludedPortions
)
occludedPortions.extend(clipOcclPortions)
return ClipPortions, occludedPortions
def getWeightTime(self, index):
"""Retrieves the global frame number the weight at the specified index
is placed at.
Args:
index(int): Index of desired weight to retrieve a time
for.
Returns:
float: Global frame number for the position of the
weight.
Raises:
IndexError
"""
if index < 0 or index >= self.numWeights:
raise IndexError('Index out of range')
# Adjust the weight time to be global, not local to the clip.
return float(mxs.getWeightTime(self.clip, index+1)) + self.globStart
def getWeightValue(self, index):
"""Retrieves the value of the weight at the specified index.
Args:
index(int): Index of desired weight to retrieve a value
for.
Returns:
float: Value of the weight at the index specified.
Raises:
IndexError
"""
if index < 0 or index >= self.numWeights:
raise IndexError('Index out of range')
return float(mxs.getWeight(self.clip, index+1))
def iterWeights(self):
"""Wraps the MAXScript getWeight and getWeightTime global functions into
a generator that returns tuples of the time and value for all
weights in the Track.
Returns:
generator: Generator that produces tuples of
((float)time, (float)value) for weights on the
track.
"""
count = self.numWeights
for i in range(count):
t = self.getWeightTime(i)
v = self.getWeightValue(i)
yield (t, v)
def weights(self):
"""Wraps the MAXScript getWeight and getWeightTime global functions into
a generator that returns tuples of the time and value for all
weights on the Clip.
Returns:
list: List of tuples for every weight on the Clip in
the form ((float)time, (float)value).
"""
return [w for w in self.iterWeights()]
def _coalesceClipPortions(self, inputPortions):
ClipPortions = []
clip = inputPortions.pop(0)
scStart = clip.start
scEnd = clip.end
while len(inputPortions):
clip = inputPortions.pop(0)
if scEnd == clip.start:
scEnd = clip.end
else:
ClipPortions.append(ClipPortion(self, scStart, scEnd))
scStart, scEnd = clip.start, clip.end
ClipPortions.append(ClipPortion(self, scStart, scEnd))
return ClipPortions
def _occludeClipPortions(self, ClipPortions, occludedPortions):
outputClips = []
while len(ClipPortions):
sc = ClipPortions.pop(0)
for ocR in occludedPortions:
# if ClipPortion is completely occluded
if (ocR.start < sc.start) and (sc.end < ocR.end):
sc = None
break
containsOcclStart = (
(sc.start < ocR.start) and (ocR.start < sc.end)
)
containsOcclEnd = ((sc.start < ocR.end) and (ocR.end < sc.end))
if containsOcclStart and containsOcclEnd:
ClipPortions.append(ClipPortion(self, sc.start, ocR.start))
sc = ClipPortion(self, ocR.end, sc.end)
elif containsOcclStart:
sc = ClipPortion(self, sc.start, ocR.start)
elif containsOcclEnd:
sc = ClipPortion(self, ocR.end, sc.end)
else:
outputClips.append(sc)
return outputClips
def __str__(self):
return 'Clip [{}]'.format(self.filename)
################################################################################
# register the symbol
import cross3d
cross3d.registerSymbol('Clip', StudiomaxClip)
| mit | -8,625,273,223,106,353,000 | 30.487719 | 80 | 0.656118 | false | 3.429259 | false | false | false |
PLyczkowski/Sticky-Keymap | 2.74/scripts/addons_contrib/add_mesh_space_tree/simplefork.py | 3 | 6997 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# SCA Tree Generator, a Blender addon
# (c) 2013 Michel J. Anders (varkenvarken)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
from math import pi
from mathutils import Quaternion
rot120 = 2 * pi / 3
def rot(point, axis, angle):
q = Quaternion(axis, angle)
P = point.copy()
P.rotate(q)
#print(point, P)
return P
def vertexnormal(d1, d2, d3):
n1 = d1.cross(d2).normalized()
n2 = d2.cross(d3).normalized()
n3 = d3.cross(d1).normalized()
n = (n1 + n2 + n3).normalized()
if (d1 + d2 + d3).dot(n) > 0:
return -n
return n
def simplefork2(p0, p1, p2, p3, r0, r1, r2, r3):
d1 = p1 - p0
d2 = p2 - p0
d3 = p3 - p0
#print(d1, d2, d3)
n = vertexnormal(d1, d2, d3)
#print(n)
pp1 = p0 + d1 / 3
n1a = r1 * n
n1b = rot(n1a, d1, rot120)
n1c = rot(n1a, d1, -rot120)
v1a = pp1 + n1a
v1b = pp1 + n1b
v1c = pp1 + n1c
pp2 = p0 + d2 / 3
n2a = r2 * n
n2b = rot(n2a, d2, rot120)
n2c = rot(n2a, d2, -rot120)
v2a = pp2 + n2a
v2b = pp2 + n2b
v2c = pp2 + n2c
pp3 = p0 + d3 / 3
n3a = r3 * n
n3b = rot(n3a, d3, rot120)
n3c = rot(n3a, d3, -rot120)
v3a = pp3 + n3a
v3b = pp3 + n3b
v3c = pp3 + n3c
n0a = n * r0
v0a = p0 + n0a
v0c = p0 - d3.normalized() * r0 - n0a / 3
v0d = p0 - d1.normalized() * r0 - n0a / 3
v0b = p0 - d2.normalized() * r0 - n0a / 3
#v0b=p0+(n1b+n2c)/2
#v0d=p0+(n2b+n3c)/2
#v0c=p0+(n3b+n1c)/2
verts = (v1a, v1b, v1c, v2a, v2b, v2c, v3a, v3b, v3c, v0a, v0b, v0c, v0d)
faces = ((0, 1, 10, 9), (1, 2, 11, 10), (2, 0, 9, 11), # chck
(3, 4, 11, 9), (4, 5, 12, 11), (5, 3, 9, 12), # chck
(6, 7, 12, 9),
(7, 8, 10, 12),
(8, 6, 9, 10),
(10, 11, 12))
return verts, faces
def simplefork(p0, p1, p2, p3, r0, r1, r2, r3):
d1 = p1 - p0
d2 = p2 - p0
d3 = p3 - p0
#print(d1, d2, d3)
n = -vertexnormal(d1, d2, d3)
#print(n)
# the central tetrahedron
n0a = n * r0 * 0.3
v0a = n0a
v0b = -d1 / 6 - n0a / 2
v0c = -d2 / 6 - n0a / 2
v0d = -d3 / 6 - n0a / 2
n1 = v0a + v0c + v0d
n2 = v0a + v0b + v0d
n3 = v0a + v0b + v0c
q1 = n1.rotation_difference(d1)
q2 = n2.rotation_difference(d2)
q3 = n3.rotation_difference(d3)
pp1 = p0 + d1 / 3
v1a = v0a.copy()
v1b = v0c.copy()
v1c = v0d.copy()
v1a.rotate(q1)
v1b.rotate(q1)
v1c.rotate(q1)
v1a += pp1
v1b += pp1
v1c += pp1
pp2 = p0 + d2 / 3
v2a = v0a.copy()
v2b = v0b.copy()
v2c = v0d.copy()
v2a.rotate(q2)
v2b.rotate(q2)
v2c.rotate(q2)
v2a += pp2
v2b += pp2
v2c += pp2
pp3 = p0 + d3 / 3
v3a = v0a.copy()
v3b = v0b.copy()
v3c = v0c.copy()
v3a.rotate(q3)
v3b.rotate(q3)
v3c.rotate(q3)
v3a += pp3
v3b += pp3
v3c += pp3
v0a += p0
v0b += p0
v0c += p0
v0d += p0
verts = (v1a, v1b, v1c, v2a, v2b, v2c, v3a, v3b, v3c, v0a, v0b, v0c, v0d)
faces = (
#(1, 2, 12, 11),
#(9, 12, 2, 0),
#(11, 9, 0, 1),
#(5, 4, 10, 12),
#(4, 3, 9, 10),
#(3, 5, 12, 9),
(8, 7, 11, 10),
(7, 5, 9, 11),
(6, 8, 10, 9),
(10, 11, 12))
return verts, faces
def bridgequads(aquad, bquad, verts):
"return faces, aloop, bloop"
ai, bi, _ = min([(ai, bi, (verts[a] - verts[b]).length_squared) for ai, a in enumerate(aquad) for bi, b in enumerate(bquad)], key=lambda x: x[2])
n = len(aquad)
#print([(aquad[(ai+i)%n], aquad[(ai+i+1)%n], bquad[(bi+i+1)%n], bquad[(bi+i)%n]) for i in range(n)], "\n", [aquad[(ai+i)%n] for i in range(n)], "\n", [aquad[(bi+i)%n] for i in range(n)])
#print('bridgequads', aquad, bquad, ai, bi)
return ([(aquad[(ai + i) % n], aquad[(ai + i + 1) % n], bquad[(bi + i + 1) % n], bquad[(bi + i) % n]) for i in range(n)], [aquad[(ai + i) % n] for i in range(n)], [bquad[(bi + i) % n] for i in range(n)])
def quadfork(p0, p1, p2, p3, r0, r1, r2, r3):
d1 = p1 - p0
d2 = p2 - p0
d3 = p3 - p0
a = (d3 - d2).normalized()
n = d2.cross(d3).normalized()
pp1 = p0 + d1 / 3
pp2 = p0 + d2 / 3
pp3 = p0 + d3 / 3
v2a = pp2 + (n + a) * r2
v2b = pp2 + (n - a) * r2
v2c = pp2 + (-n - a) * r2
v2d = pp2 + (-n + a) * r2
v3a = pp3 + (n + a) * r3
v3b = pp3 + (n - a) * r3
v3c = pp3 + (-n - a) * r3
v3d = pp3 + (-n + a) * r3
a = d1.cross(n).normalized()
n = a.cross(d1).normalized()
v1a = pp1 + (n + a) * r1
v1b = pp1 + (n - a) * r1
v1c = pp1 + (-n - a) * r1
v1d = pp1 + (-n + a) * r1
#the top of the connecting block consist of two quads
v0a = p0 + (n + a) * r0
v0b = p0 + (n - a) * r0
v0c = p0 + (-n - a) * r0
v0d = p0 + (-n + a) * r0
v0ab = p0 + n * r0
v0cd = p0 - n * r0
#the bottom is a single quad (which means the front and back are 5gons)
d = d1.normalized() * r0 * 0.1
vb0a = v0a + d
vb0b = v0b + d
vb0c = v0c + d
vb0d = v0d + d
verts = [v1a, v1b, v1c, v1d, # 0 1 2 3
v2a, v2b, v2c, v2d, # 4 5 6 7
v3a, v3b, v3c, v3d, # 8 9 10 11
v0a, v0ab, v0b, v0c, v0cd, v0d, # 12 13 14 15 16 17
vb0a, vb0b, vb0c, vb0d] # 18 19 20 21
faces = [(0, 1, 19, 18), # p1->p0 bottom
(1, 2, 20, 19),
(2, 3, 21, 20),
(3, 0, 18, 21),
#(4, 5, 14, 13), # p2 -> p0 top right
#(5, 6, 15, 14),
#(6, 7, 16, 15),
#(7, 4, 13, 16),
(13, 14, 5, 4),
(14, 15, 6, 5),
(15, 16, 7, 6),
(16, 13, 4, 7),
#(8, 9, 13, 12), # p3 -> p0 top left
#(9, 10, 16, 13),
#(10, 11, 17, 16),
#(11, 8, 12, 17),
(12, 13, 9, 8),
(13, 16, 10, 9),
(16, 17, 11, 10),
(17, 12, 8, 11),
#(12, 17, 21, 18), # connecting block
#(14, 15, 20, 19),
#(12, 13, 14, 19, 18),
#(15, 16, 17, 21, 20)]
(12, 17, 21, 18), # connecting block
(19, 20, 15, 14),
(18, 19, 14, 13, 12),
(20, 21, 17, 16, 15)]
return verts, faces
| gpl-2.0 | -6,087,566,832,142,054,000 | 24.351449 | 207 | 0.480492 | false | 2.266602 | false | false | false |
anhnv-3991/VoltDB | lib/python/voltcli/voltdb.d/collect.py | 2 | 3534 | import sys
# This file is part of VoltDB.
# Copyright (C) 2008-2015 VoltDB Inc.
#
# This file contains original code and/or modifications of original code.
# Any modifications made by VoltDB Inc. are licensed under the following
# terms and conditions:
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
@VOLT.Command(
description = 'Collect logs on the current node for problem analysis.',
options = (
VOLT.StringOption (None, '--prefix', 'prefix',
'file name prefix for uniquely identifying collection',
default = 'voltdb_logs'),
VOLT.StringOption (None, '--upload', 'host',
'upload resulting collection to HOST via SFTP',
default = ''),
VOLT.StringOption (None, '--username', 'username',
'user name for SFTP upload',
default = ''),
VOLT.StringOption (None, '--password', 'password',
'password for SFTP upload',
default = ''),
VOLT.BooleanOption(None, '--no-prompt', 'noprompt',
'automatically upload collection (without user prompt)',
default = False),
VOLT.BooleanOption(None, '--dry-run', 'dryrun',
'list the log files without collecting them',
default = False),
VOLT.BooleanOption(None, '--skip-heap-dump', 'skipheapdump',
'exclude heap dump file from collection',
default = False),
VOLT.IntegerOption(None, '--days', 'days',
'number of days of files to collect (files included are log, crash files), Current day value is 1',
default = 14)
),
arguments = (
VOLT.PathArgument('voltdbroot', 'the voltdbroot path', absolute = True)
)
)
def collect(runner):
if int(runner.opts.days) == 0:
print >> sys.stderr, "ERROR: '0' is invalid entry for option --days"
sys.exit(-1)
runner.args.extend(['--voltdbroot='+runner.opts.voltdbroot, '--prefix='+runner.opts.prefix, '--host='+runner.opts.host, '--username='+runner.opts.username, '--password='+runner.opts.password,
'--noprompt='+str(runner.opts.noprompt), '--dryrun='+str(runner.opts.dryrun), '--skipheapdump='+str(runner.opts.skipheapdump), '--days='+str(runner.opts.days)])
runner.java_execute('org.voltdb.utils.Collector', None, *runner.args)
| agpl-3.0 | -7,497,001,605,177,532,000 | 50.970588 | 195 | 0.626203 | false | 4.330882 | false | false | false |
jplusplus/dystopia-tracker | app/api/views.py | 1 | 2135 | from app.core.models import Source, Prediction, Realisation, Category
from rest_framework import generics, permissions, filters
from app.api.serializers import SourceSerializer, PredictionSerializer, RealisationSerializer, CategorySerializer
from app.api.serializers import PredictionCreationSerializer
import app.api.filters
class PredictionList(generics.ListCreateAPIView):
queryset = Prediction.objects.all().order_by('-creation_date')
serializer_class = PredictionSerializer
permission_classes = [
permissions.AllowAny
]
filter_class = app.api.filters.PredictionFilter
def get_serializer_class(self):
if self.request.method == 'GET':
return PredictionSerializer
else:
return PredictionCreationSerializer
class PredictionDetail(generics.RetrieveUpdateDestroyAPIView):
model = Prediction
serializer_class = PredictionSerializer
permission_classes = [
permissions.AllowAny
]
filter_class = app.api.filters.PredictionFilter
class SourceList(generics.ListCreateAPIView):
model = Source
serializer_class = SourceSerializer
permission_classes = [
permissions.AllowAny
]
filter_class = app.api.filters.SourceFilter
class SourceDetail(generics.RetrieveUpdateDestroyAPIView):
model = Source
serializer_class = SourceSerializer
permission_classes = [
permissions.AllowAny
]
class CategoryList(generics.ListAPIView):
model = Category
serializer_class = CategorySerializer
permission_classes = [
permissions.AllowAny
]
class CategoryDetail(generics.RetrieveAPIView):
model = Category
serializer_class = CategorySerializer
permission_classes = [
permissions.AllowAny
]
class RealisationCreate(generics.CreateAPIView):
model = Realisation
serializer_class = RealisationSerializer
permission_classes = [
permissions.AllowAny
]
class RealisationDetail(generics.RetrieveUpdateAPIView):
model = Realisation
serializer_class = RealisationSerializer
permission_classes = [
permissions.AllowAny
] | lgpl-3.0 | -6,725,103,098,441,850,000 | 29.514286 | 113 | 0.738173 | false | 4.621212 | false | false | false |
gurneyalex/odoo | addons/mrp_subcontracting_account/tests/test_subcontracting_account.py | 3 | 3886 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.mrp_subcontracting.tests.common import TestMrpSubcontractingCommon
from odoo.addons.stock_account.tests.test_stockvaluation import _create_accounting_data
from odoo.tests.common import Form
class TestAccountSubcontractingFlows(TestMrpSubcontractingCommon):
def test_subcontracting_account_flow_1(self):
self.stock_input_account, self.stock_output_account, self.stock_valuation_account, self.expense_account, self.stock_journal = _create_accounting_data(self.env)
self.finished.categ_id.property_valuation = 'real_time'
self.finished.write({
'property_account_expense_id': self.expense_account.id,
})
self.finished.categ_id.write({
'property_stock_account_input_categ_id': self.stock_input_account.id,
'property_stock_account_output_categ_id': self.stock_output_account.id,
'property_stock_valuation_account_id': self.stock_valuation_account.id,
'property_stock_journal': self.stock_journal.id,
})
self.stock_location = self.env.ref('stock.stock_location_stock')
self.customer_location = self.env.ref('stock.stock_location_customers')
self.supplier_location = self.env.ref('stock.stock_location_suppliers')
self.uom_unit = self.env.ref('uom.product_uom_unit')
self.env.ref('product.product_category_all').property_cost_method = 'fifo'
self.env.ref('product.product_category_all').property_valuation = 'real_time'
# IN 10@10 comp1 10@20 comp2
move1 = self.env['stock.move'].create({
'name': 'IN 10 units @ 10.00 per unit',
'location_id': self.supplier_location.id,
'location_dest_id': self.env.company.subcontracting_location_id.id,
'product_id': self.comp1.id,
'product_uom': self.uom_unit.id,
'product_uom_qty': 10.0,
'price_unit': 10.0,
})
move1._action_confirm()
move1._action_assign()
move1.move_line_ids.qty_done = 10.0
move1._action_done()
move2 = self.env['stock.move'].create({
'name': 'IN 10 units @ 20.00 per unit',
'location_id': self.supplier_location.id,
'location_dest_id': self.env.company.subcontracting_location_id.id,
'product_id': self.comp2.id,
'product_uom': self.uom_unit.id,
'product_uom_qty': 10.0,
'price_unit': 20.0,
})
move2._action_confirm()
move2._action_assign()
move2.move_line_ids.qty_done = 10.0
move2._action_done()
picking_form = Form(self.env['stock.picking'])
picking_form.picking_type_id = self.env.ref('stock.picking_type_in')
picking_form.partner_id = self.subcontractor_partner1
with picking_form.move_ids_without_package.new() as move:
move.product_id = self.finished
move.product_uom_qty = 1
picking_receipt = picking_form.save()
picking_receipt.move_lines.price_unit = 30.0
picking_receipt.action_confirm()
picking_receipt.move_lines.quantity_done = 1.0
picking_receipt.action_done()
# Finished is made of 1 comp1 and 1 comp2.
# Cost of comp1 = 10
# Cost of comp2 = 20
# --> Cost of finished = 10 + 20 = 30
# Additionnal cost = 30 (from the purchase order line or directly set on the stock move here)
# Total cost of subcontracting 1 unit of finished = 30 + 30 = 60
self.assertEqual(picking_receipt.move_lines.stock_valuation_layer_ids.value, 60)
self.assertEqual(picking_receipt.move_lines.product_id.value_svl, 60)
self.assertEqual(picking_receipt.move_lines.stock_valuation_layer_ids.account_move_id.amount_total, 60)
| agpl-3.0 | -6,860,017,461,789,438,000 | 49.467532 | 167 | 0.642306 | false | 3.542388 | true | false | false |
BrainPad/FindYourCandy | webapp/candysorter/models/images/classify.py | 1 | 2701 | # Copyright 2017 BrainPad Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import os
import tensorflow as tf
from trainer.feature_extractor import FeatureExtractor
from trainer.model import ModelParams, TransferModel
logger = logging.getLogger(__name__)
class CandyClassifier(object):
def __init__(self, checkpoint_dir, params_file, inception_model_file):
self.inception_model = None
self.model = None
self.checkpoint_dir = checkpoint_dir
self.params_file = params_file
self.inception_model_file = inception_model_file
@classmethod
def from_config(cls, config):
checkpoint_dir = config.CLASSIFIER_MODEL_DIR
return cls(
checkpoint_dir=checkpoint_dir,
params_file=os.path.join(checkpoint_dir, 'params.json'),
inception_model_file=config.INCEPTION_MODEL_FILE
)
def init(self):
self._load_inception_model()
self._load_transfer_model()
def reload(self):
tf.reset_default_graph()
self._load_transfer_model()
def _load_inception_model(self):
logger.info('Loading inception model...')
self.inception_model = FeatureExtractor(self.inception_model_file)
logger.info('Finished loading inception model.')
def _load_transfer_model(self):
logger.info('Loading transfer model...')
with tf.gfile.FastGFile(self.params_file, 'r') as f:
params = ModelParams.from_json(f.read())
self.model = TransferModel.from_model_params(params)
logger.info('Finished loading transfer model.')
def classify(self, img_bgr):
features = self.inception_model.get_feature_vector(img_bgr)
ckpt = tf.train.get_checkpoint_state(self.checkpoint_dir)
if ckpt is None:
raise IOError('Checkpoints not found.')
checkpoint_path = ckpt.model_checkpoint_path
result = self.model.restore_and_predict(features, checkpoint_path)
return result[0]
| apache-2.0 | -3,130,514,658,233,544,700 | 36 | 82 | 0.668271 | false | 4.061654 | false | false | false |
eykd/ravel | ravel/loaders.py | 1 | 1313 | import codecs
import attr
from path import Path
from . import exceptions
class BaseLoader:
def load(self, environment, name):
source, is_up_to_date = self.get_source(environment, name)
return environment.compile_rulebook(source, name, is_up_to_date)
def get_source(self, environment, name):
raise NotImplementedError()
@attr.s
class FileSystemLoader(BaseLoader):
base_path = attr.ib(default='.')
extension = attr.ib(default='.ravel')
def get_up_to_date_checker(self, filepath):
filepath = Path(filepath)
try:
mtime = filepath.getmtime()
except OSError:
mtime = 0.0
def is_up_to_date():
try:
print('was %s, now %s' % (mtime, filepath.getmtime()))
return mtime == filepath.getmtime()
except OSError:
return False
return is_up_to_date
def get_source(self, environment, name):
filepath = Path(self.base_path) / (name + self.extension)
if not filepath.exists():
raise exceptions.RulebookNotFound(name)
with codecs.open(filepath, encoding='utf-8') as fi:
source = fi.read()
is_up_to_date = self.get_up_to_date_checker(filepath)
return source, is_up_to_date
| mit | -5,678,360,746,962,672,000 | 25.795918 | 72 | 0.600152 | false | 3.931138 | false | false | false |
feltus/BDSS | client/tests/transfer/mechanisms/test_user_options.py | 1 | 2020 | # Big Data Smart Socket
# Copyright (C) 2016 Clemson University
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import unittest
from unittest.mock import patch
from voluptuous import All, Range
from client.transfer.mechanisms import base
class TestUserInputOptionMechanism(unittest.TestCase):
@patch("builtins.input", return_value="Hello")
def test_user_input_option_prompts(self, mock_input):
opt = base.UserInputOption("option", "Option value?")
val = opt.prompt_for_value()
mock_input.assert_called_once_with("Option value?")
self.assertEqual(val, "Hello")
@patch.object(base, "getpass", return_value="World")
@patch("builtins.input", return_value="Hello")
def test_user_input_option_prompts_with_hidden_input(self, mock_input, mock_getpass):
opt = base.UserInputOption("option", "Option value?", hide_input=True)
val = opt.prompt_for_value()
mock_getpass.assert_called_once_with("Option value?")
self.assertEqual(val, "World")
@patch("builtins.input", side_effect=["a", -1, 5])
def test_prompt_until_valid_value(self, mock_input):
opt = base.UserInputOption("option", "Option value?", validation=All(int, Range(min=4, max=8)))
val = opt.prompt_for_value()
self.assertEqual(mock_input.call_count, 3)
self.assertEqual(val, 5)
| gpl-2.0 | 824,064,339,617,019,800 | 40.22449 | 103 | 0.710891 | false | 3.761639 | true | false | false |
andreykurilin/mlm | mlm/shell.py | 1 | 2502 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Command-line interface to MLM
"""
import inspect
import pkgutil
import sys
import argparse
import mlm
from mlm import api
from mlm import commands
from mlm.commands import utils
def main(input_args=None):
if input_args is None:
input_args = sys.argv[1:]
# base parser
parser = argparse.ArgumentParser(
prog="mlm",
description=__doc__.strip(),
add_help=True
)
parser.add_argument('-v', '--version',
action='version',
version=mlm.__version__)
parser.add_argument('--config-file', type=str, metavar="<file>",
help="Path to configuration file.")
# all subcommands
subcommands = parser.add_subparsers(help='<subcommands>')
for importer, modname, _ in pkgutil.iter_modules(commands.__path__):
# load all submodules
importer.find_module(modname).load_module(modname)
for group_cls in utils.BaseCommand.__subclasses__():
group_parser = subcommands.add_parser(
group_cls.__name__.lower(),
help=group_cls.__doc__)
subcommand_parser = group_parser.add_subparsers()
for name, callback in inspect.getmembers(
group_cls(), predicate=inspect.ismethod):
command = name.replace('_', '-')
desc = callback.__doc__ or ''
help_message = desc.strip().split('\n')[0]
arguments = getattr(callback, 'args', [])
command_parser = subcommand_parser.add_parser(
command, help=help_message, description=desc)
for (args, kwargs) in arguments:
command_parser.add_argument(*args, **kwargs)
command_parser.set_defaults(func=callback)
# parse and run
args = parser.parse_args(input_args)
args.func(api.API(args.config_file), args)
if __name__ == "__main__":
main()
| apache-2.0 | -1,780,211,756,740,789,200 | 30.670886 | 78 | 0.621503 | false | 4.149254 | false | false | false |
gioman/QGIS | python/plugins/processing/algs/qgis/SpatialiteExecuteSQL.py | 1 | 2882 | # -*- coding: utf-8 -*-
"""
***************************************************************************
SpatialiteExecuteSQL.py
---------------------
Date : October 2016
Copyright : (C) 2016 by Mathieu Pellerin
Email : nirvn dot asia at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Mathieu Pellerin'
__date__ = 'October 2016'
__copyright__ = '(C) 2016, Mathieu Pellerin'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterString
from processing.tools import spatialite
from qgis.core import (QgsApplication,
QgsDataSourceUri)
class SpatialiteExecuteSQL(GeoAlgorithm):
DATABASE = 'DATABASE'
SQL = 'SQL'
def icon(self):
return QgsApplication.getThemeIcon("/providerQgis.svg")
def svgIconPath(self):
return QgsApplication.iconPath("providerQgis.svg")
def group(self):
return self.tr('Database')
def name(self):
return 'spatialiteexecutesql'
def displayName(self):
return self.tr('Spatialite execute SQL')
def defineCharacteristics(self):
self.addParameter(ParameterVector(self.DATABASE, self.tr('File Database'), False, False))
self.addParameter(ParameterString(self.SQL, self.tr('SQL query'), '', True))
def processAlgorithm(self, context, feedback):
database = self.getParameterValue(self.DATABASE)
uri = QgsDataSourceUri(database)
if uri.database() is '':
if '|layerid' in database:
database = database[:database.find('|layerid')]
uri = QgsDataSourceUri('dbname=\'%s\'' % (database))
self.db = spatialite.GeoDB(uri)
sql = self.getParameterValue(self.SQL).replace('\n', ' ')
try:
self.db._exec_sql_and_commit(str(sql))
except spatialite.DbError as e:
raise GeoAlgorithmExecutionException(
self.tr('Error executing SQL:\n{0}').format(str(e)))
| gpl-2.0 | 1,415,196,693,037,728,300 | 36.921053 | 97 | 0.560722 | false | 4.648387 | false | false | false |
ARG-TLQ/Red-DiscordBot | cogs/wordfilter/converter.py | 1 | 1946 | #!/usr/bin/env python3.6
"""Custom v2 to v3 converter for WordFilter
Usage:
------
Copy v2 data into the same directory as this script. This includes filter.json,
settings.json, and whitelist.json.
Outputs:
--------
Saves a converted v3 json file in the same directory with the filename v3data.json.
Original data remains untouched.
"""
import json
UID = "5842647" # When typed on a T9 keyboard: luicogs
BASE = {UID: {"GUILD": {}}}
v3Json = BASE
# Convert filters into v3 format.
with open("filter.json") as v2Filters:
print("Converting filter.json...")
filters = json.load(v2Filters)
for key, val in filters.items():
if key not in v3Json[UID]["GUILD"]:
v3Json[UID]["GUILD"][key] = {}
v3Json[UID]["GUILD"][key]["filters"] = val
with open("settings.json") as v2Settings:
print("Converting settings.json...")
settings = json.load(v2Settings)
for key, val in settings.items():
if key not in v3Json[UID]["GUILD"]:
v3Json[UID]["GUILD"][key] = {}
# Merge two dicts together, should have no conflicts.
v3Json[UID]["GUILD"][key] = {**v3Json[UID]["GUILD"][key], **val}
with open("whitelist.json") as v2Whitelist:
print("Converting whitelist.json...")
whitelist = json.load(v2Whitelist)
for key, val in whitelist.items():
if key not in v3Json[UID]["GUILD"]:
v3Json[UID]["GUILD"][key] = {}
v3Json[UID]["GUILD"][key]["channelAllowed"] = val
with open("command_blacklist.json") as v2CmdBlacklist:
print("Converting command_blacklist.json..")
blacklist = json.load(v2CmdBlacklist)
for key, val in blacklist.items():
if key not in v3Json[UID]["GUILD"]:
v3Json[UID]["GUILD"][key] = {}
v3Json[UID]["GUILD"][key]["commandDenied"] = val
with open("v3data.json", "w") as output:
json.dump(v3Json, output, indent=4)
print("Word filter data successfully converted to v3 format!")
| gpl-3.0 | 1,165,456,621,346,796,500 | 32.551724 | 83 | 0.641829 | false | 3.320819 | false | false | false |
ldacosta/geometry2D | geometry/point.py | 1 | 5140 | """Point and Rectangle classes.
This code is in the public domain.
Point -- point with (x,y) coordinates
Rect -- two points, forming a rectangle
Taken from https://wiki.python.org/moin/PointsAndRectangles
"""
import math
from typing import Tuple
class Point:
"""A point identified by (x,y) coordinates.
supports: +, -, *, /, str, repr
length -- calculate length of vector to point from origin
distance_to -- calculate distance between two points
as_tuple -- construct tuple (x,y)
clone -- construct a duplicate
integerize -- convert x & y to integers
floatize -- convert x & y to floats
move_to -- reset x & y
slide -- move (in place) +dx, +dy, as spec'd by point
slide_xy -- move (in place) +dx, +dy
rotate -- rotate around the origin
rotate_about -- rotate around another point
"""
def __init__(self, x:float=0.0, y:float=0.0):
self.x = x
self.y = y
@classmethod
def from_tuple(cls, pt_as_tuple:Tuple[float,float]):
return cls(x=pt_as_tuple[0],y=pt_as_tuple[1])
def __iter__(self):
return iter((self.x, self.y))
def __getitem__(self, item):
if item == 0:
return self.x
elif item == 1:
return self.y
else:
raise RuntimeError("Index %d does not make sense in a point" % (item))
def __attrs(self):
"""
All attributes in a single representation.
Returns:
A tuple with all attributes.
"""
return (self.x, self.y)
def __eq__(self, other):
return isinstance(other, Point) and self.__attrs() == other.__attrs()
def __hash__(self):
return hash(self.__attrs())
def __add__(self, another_pt):
"""Point(x1+x2, y1+y2)"""
return Point(self.x + another_pt.x, self.y + another_pt.y)
def __sub__(self, another_point):
"""Point(x1-x2, y1-y2)"""
return Point(self.x - another_point.x, self.y - another_point.y)
def __isub__(self, another_point):
self.x += another_point.x
self.y += another_point.y
return self
def __mul__(self, scalar):
"""Point(x1*x2, y1*y2)"""
return Point(self.x * scalar, self.y * scalar)
def __div__(self, scalar):
"""Point(x1/x2, y1/y2)"""
return Point(self.x / scalar, self.y / scalar)
def __str__(self):
return "(%.2f, %.2f)" % (self.x, self.y)
def __repr__(self):
return "%s(%r, %r)" % (self.__class__.__name__, self.x, self.y)
def length(self) -> float:
"""norm of vector (0,0) to this point"""
return math.sqrt(self.x ** 2 + self.y ** 2)
def distance_to(self, another_point) -> float:
"""Calculate the distance between two points."""
return (self - another_point).length()
def as_tuple(self):
"""(x, y)"""
return (self.x, self.y)
def clone(self):
"""Return a full copy of this point."""
return Point(self.x, self.y)
def integerize(self):
"""Convert co-ordinate values to integers."""
self.x = int(round(self.x))
self.y = int(round(self.y))
return self
def floatize(self):
"""Convert co-ordinate values to floats."""
self.x = float(self.x)
self.y = float(self.y)
def move_to(self, x, y):
"""Reset x & y coordinates."""
self.x = x
self.y = y
def translate_following(self, a_vector):
"""
Move to new (x+dx,y+dy).
:param a_vector: Vector 2D I have to follow.
:return: Unit.
"""
self.x = self.x + a_vector.x
self.y = self.y + a_vector.y
return self
def slide_xy(self, dx, dy):
'''Move to new (x+dx,y+dy).
Can anyone think up a better name for this function?
slide? shift? delta? move_by?
'''
self.x = self.x + dx
self.y = self.y + dy
def rotate(self, rad):
"""Rotate counter-clockwise by rad radians.
Positive y goes *up,* as in traditional mathematics.
Interestingly, you can use this in y-down computer graphics, if
you just remember that it turns clockwise, rather than
counter-clockwise.
The new position is returned as a new Point.
"""
a_sinus, a_cosinus = [f(rad) for f in (math.sin, math.cos)]
x, y = (a_cosinus * self.x - a_sinus * self.y, a_sinus * self.x + a_cosinus * self.y)
return Point(x, y)
def rotate_about(self, a_point, theta):
"""Rotate counter-clockwise around a point, by theta degrees.
Positive y goes *up,* as in traditional mathematics.
The new position is returned as a new Point.
"""
result = self.clone()
result.slide_xy(-a_point.x, -a_point.y)
result.rotate(theta)
result.slide_xy(a_point.x, a_point.y)
return result
POINT_ZEROZERO = Point(x=0.0, y=0.0)
def average_between(pt1: Point, pt2: Point) -> Point:
"""Returns the point in the 'middle' of the two."""
return Point((pt1.x + pt2.x)/2, (pt1.y + pt2.y)/2)
| mit | -6,745,160,734,486,974,000 | 28.039548 | 93 | 0.560506 | false | 3.383805 | false | false | false |
moshekaplan/PDF-Tools | pdf_js.py | 1 | 2309 | #!/usr/bin/env python
"""
This script extracts all JavaScript from a supplied PDF file.
The script finds all JavaScript by walking the PDF tree and looking for all text
referenced with a JS entry, as specified in Section 8.5 of PDF v1.7.
"""
import sys
import StringIO
import warnings
import PyPDF2
from PyPDF2.generic import DictionaryObject, ArrayObject, IndirectObject
from PyPDF2.utils import PdfReadError
def walk_pdf_tree(node, already_visited=None):
# Indirect objects can refer to each other in a loop.
# Maintain a set of visited nodes to avoid a stack overflow
if already_visited is None:
already_visited = set()
yield node
# Walk through the node's children
if isinstance(node, DictionaryObject):
for k, v in node.iteritems():
for node in walk_pdf_tree(v, already_visited):
yield node
elif isinstance(node, ArrayObject):
for v in node:
for node in walk_pdf_tree(v, already_visited):
yield node
elif isinstance(node, IndirectObject):
idnum = node.idnum
if idnum in already_visited:
pass
else:
already_visited.add(idnum)
# Dereferencing an object can sometimes fail
try:
v = node.getObject()
except PdfReadError:
pass
else:
for node in walk_pdf_tree(v, already_visited):
yield node
def find_js(pdf_object):
js = list()
root = pdf_object.trailer
# Ignore warnings from failed Object dereferences
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for node in walk_pdf_tree(root):
if isinstance(node, DictionaryObject) and '/JS' in node.keys():
js.append(node['/JS'].getData())
return js
def extract_js(fpath):
with open(fpath, 'rb') as fh:
src_pdf_blob = fh.read()
pdf_data = PyPDF2.PdfFileReader(StringIO.StringIO(src_pdf_blob))
js = find_js(pdf_data)
if js:
print "\n\n".join(js)
def main():
if len(sys.argv) < 2:
print "USAGE: %s %s <filename>" % (sys.executable, sys.argv[0])
sys.exit(1)
fpath = sys.argv[1]
extract_js(fpath)
if __name__ == "__main__":
main()
| bsd-2-clause | -4,235,529,645,314,861,600 | 28.227848 | 80 | 0.612386 | false | 3.848333 | false | false | false |
Knio/miru | examples/lights00.py | 1 | 2057 | try:
from psyco import full
except ImportError:
print 'no psyco'
from pyglet import options as pyglet_options
pyglet_options['debug_gl'] = False
import pyglet
from miru.ui import TestWindow
from miru.context import context
from miru import options as miru_options
from miru import camera
from miru import utils
from miru import input
from miru import graphics
from miru import core
from miru.ext import geom
import os
P = os.path.join
# initialize the window
w = TestWindow(680, 400)
w.set_vsync(False)
utils.addFpsDisplay()
context.window = w
context.control = input.SimpleMouseControl()
context.camera.pos += (0,1,2)
context.camera.angle = (10,0,0)
#context.handle.accessible = True
o = graphics.load_wobj(P('docs','demo','alien.obj'))
o.pos += (0,0.95,-0.6)
context.add_object(o)
# Play around with the spot light
context.camera.lights = camera.LightGroup([
camera.DirectionalLight(diffuse=(0.1,0.1,0.1,1)),
camera.PositionalLight(pos=(0,2,0), spot_cutoff=25,
track_target=o, spot_exponent=10, kq=0.1),
camera.PositionalLight(pos=(-0.54,1.3,2.5), diffuse=(0.9,0,0,1)),
camera.PositionalLight(pos=(1.6,1.3,2.7), diffuse=(0,0.9,0,1)),
camera.PositionalLight(pos=(-2.7,1.7,0.3), diffuse=(0,0,0.9,1)),
])
for i in range(1,len(context.camera.lights)):
context.camera.lights[i].debug = True
batch = pyglet.graphics.Batch()
color_group = graphics.ColorGroup((0.5,0.5,0.5,1.0))
objs = []
for x in range(-3, 4):
for z in range(-3, 4):
sphere = geom.Cube(0.7)
geom.transform(sphere, (x * 1.23, 0, z * 1.23))
geom.get_vlist(sphere, batch, color_group)
context.add_object(core.Object(batch))
print """
You should see a small grid of objects with some
colored lights over it. You should be able to click
on the bulb of a light to move it around. The white
light is a spotlight which remains focussed on the
object floating above the grid.
"""
while not w.has_exit:
pyglet.clock.tick()
w.clear()
w.dispatch_events()
context.render()
w.flip()
w.close()
| mit | -892,115,385,782,662,700 | 22.918605 | 69 | 0.691298 | false | 2.829436 | false | false | false |
docusign/docusign-python-client | docusign_esign/models/template_update_summary.py | 1 | 11501 | # coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class TemplateUpdateSummary(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'bulk_envelope_status': 'BulkEnvelopeStatus',
'envelope_id': 'str',
'error_details': 'ErrorDetails',
'list_custom_field_update_results': 'list[ListCustomField]',
'lock_information': 'LockInformation',
'purge_state': 'str',
'recipient_update_results': 'list[RecipientUpdateResponse]',
'tab_update_results': 'Tabs',
'text_custom_field_update_results': 'list[TextCustomField]'
}
attribute_map = {
'bulk_envelope_status': 'bulkEnvelopeStatus',
'envelope_id': 'envelopeId',
'error_details': 'errorDetails',
'list_custom_field_update_results': 'listCustomFieldUpdateResults',
'lock_information': 'lockInformation',
'purge_state': 'purgeState',
'recipient_update_results': 'recipientUpdateResults',
'tab_update_results': 'tabUpdateResults',
'text_custom_field_update_results': 'textCustomFieldUpdateResults'
}
def __init__(self, bulk_envelope_status=None, envelope_id=None, error_details=None, list_custom_field_update_results=None, lock_information=None, purge_state=None, recipient_update_results=None, tab_update_results=None, text_custom_field_update_results=None): # noqa: E501
"""TemplateUpdateSummary - a model defined in Swagger""" # noqa: E501
self._bulk_envelope_status = None
self._envelope_id = None
self._error_details = None
self._list_custom_field_update_results = None
self._lock_information = None
self._purge_state = None
self._recipient_update_results = None
self._tab_update_results = None
self._text_custom_field_update_results = None
self.discriminator = None
if bulk_envelope_status is not None:
self.bulk_envelope_status = bulk_envelope_status
if envelope_id is not None:
self.envelope_id = envelope_id
if error_details is not None:
self.error_details = error_details
if list_custom_field_update_results is not None:
self.list_custom_field_update_results = list_custom_field_update_results
if lock_information is not None:
self.lock_information = lock_information
if purge_state is not None:
self.purge_state = purge_state
if recipient_update_results is not None:
self.recipient_update_results = recipient_update_results
if tab_update_results is not None:
self.tab_update_results = tab_update_results
if text_custom_field_update_results is not None:
self.text_custom_field_update_results = text_custom_field_update_results
@property
def bulk_envelope_status(self):
"""Gets the bulk_envelope_status of this TemplateUpdateSummary. # noqa: E501
:return: The bulk_envelope_status of this TemplateUpdateSummary. # noqa: E501
:rtype: BulkEnvelopeStatus
"""
return self._bulk_envelope_status
@bulk_envelope_status.setter
def bulk_envelope_status(self, bulk_envelope_status):
"""Sets the bulk_envelope_status of this TemplateUpdateSummary.
:param bulk_envelope_status: The bulk_envelope_status of this TemplateUpdateSummary. # noqa: E501
:type: BulkEnvelopeStatus
"""
self._bulk_envelope_status = bulk_envelope_status
@property
def envelope_id(self):
"""Gets the envelope_id of this TemplateUpdateSummary. # noqa: E501
The envelope ID of the envelope status that failed to post. # noqa: E501
:return: The envelope_id of this TemplateUpdateSummary. # noqa: E501
:rtype: str
"""
return self._envelope_id
@envelope_id.setter
def envelope_id(self, envelope_id):
"""Sets the envelope_id of this TemplateUpdateSummary.
The envelope ID of the envelope status that failed to post. # noqa: E501
:param envelope_id: The envelope_id of this TemplateUpdateSummary. # noqa: E501
:type: str
"""
self._envelope_id = envelope_id
@property
def error_details(self):
"""Gets the error_details of this TemplateUpdateSummary. # noqa: E501
:return: The error_details of this TemplateUpdateSummary. # noqa: E501
:rtype: ErrorDetails
"""
return self._error_details
@error_details.setter
def error_details(self, error_details):
"""Sets the error_details of this TemplateUpdateSummary.
:param error_details: The error_details of this TemplateUpdateSummary. # noqa: E501
:type: ErrorDetails
"""
self._error_details = error_details
@property
def list_custom_field_update_results(self):
"""Gets the list_custom_field_update_results of this TemplateUpdateSummary. # noqa: E501
# noqa: E501
:return: The list_custom_field_update_results of this TemplateUpdateSummary. # noqa: E501
:rtype: list[ListCustomField]
"""
return self._list_custom_field_update_results
@list_custom_field_update_results.setter
def list_custom_field_update_results(self, list_custom_field_update_results):
"""Sets the list_custom_field_update_results of this TemplateUpdateSummary.
# noqa: E501
:param list_custom_field_update_results: The list_custom_field_update_results of this TemplateUpdateSummary. # noqa: E501
:type: list[ListCustomField]
"""
self._list_custom_field_update_results = list_custom_field_update_results
@property
def lock_information(self):
"""Gets the lock_information of this TemplateUpdateSummary. # noqa: E501
:return: The lock_information of this TemplateUpdateSummary. # noqa: E501
:rtype: LockInformation
"""
return self._lock_information
@lock_information.setter
def lock_information(self, lock_information):
"""Sets the lock_information of this TemplateUpdateSummary.
:param lock_information: The lock_information of this TemplateUpdateSummary. # noqa: E501
:type: LockInformation
"""
self._lock_information = lock_information
@property
def purge_state(self):
"""Gets the purge_state of this TemplateUpdateSummary. # noqa: E501
# noqa: E501
:return: The purge_state of this TemplateUpdateSummary. # noqa: E501
:rtype: str
"""
return self._purge_state
@purge_state.setter
def purge_state(self, purge_state):
"""Sets the purge_state of this TemplateUpdateSummary.
# noqa: E501
:param purge_state: The purge_state of this TemplateUpdateSummary. # noqa: E501
:type: str
"""
self._purge_state = purge_state
@property
def recipient_update_results(self):
"""Gets the recipient_update_results of this TemplateUpdateSummary. # noqa: E501
# noqa: E501
:return: The recipient_update_results of this TemplateUpdateSummary. # noqa: E501
:rtype: list[RecipientUpdateResponse]
"""
return self._recipient_update_results
@recipient_update_results.setter
def recipient_update_results(self, recipient_update_results):
"""Sets the recipient_update_results of this TemplateUpdateSummary.
# noqa: E501
:param recipient_update_results: The recipient_update_results of this TemplateUpdateSummary. # noqa: E501
:type: list[RecipientUpdateResponse]
"""
self._recipient_update_results = recipient_update_results
@property
def tab_update_results(self):
"""Gets the tab_update_results of this TemplateUpdateSummary. # noqa: E501
:return: The tab_update_results of this TemplateUpdateSummary. # noqa: E501
:rtype: Tabs
"""
return self._tab_update_results
@tab_update_results.setter
def tab_update_results(self, tab_update_results):
"""Sets the tab_update_results of this TemplateUpdateSummary.
:param tab_update_results: The tab_update_results of this TemplateUpdateSummary. # noqa: E501
:type: Tabs
"""
self._tab_update_results = tab_update_results
@property
def text_custom_field_update_results(self):
"""Gets the text_custom_field_update_results of this TemplateUpdateSummary. # noqa: E501
# noqa: E501
:return: The text_custom_field_update_results of this TemplateUpdateSummary. # noqa: E501
:rtype: list[TextCustomField]
"""
return self._text_custom_field_update_results
@text_custom_field_update_results.setter
def text_custom_field_update_results(self, text_custom_field_update_results):
"""Sets the text_custom_field_update_results of this TemplateUpdateSummary.
# noqa: E501
:param text_custom_field_update_results: The text_custom_field_update_results of this TemplateUpdateSummary. # noqa: E501
:type: list[TextCustomField]
"""
self._text_custom_field_update_results = text_custom_field_update_results
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TemplateUpdateSummary, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TemplateUpdateSummary):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| mit | 8,655,280,657,151,386,000 | 33.537538 | 277 | 0.632901 | false | 4.138539 | false | false | false |
thiderman/network-kitten | kitten/client.py | 1 | 1447 | import json
import logbook
import zmq.green as zmq
from kitten.request import RequestError
class KittenClient(object):
log = logbook.Logger('KittenClient')
timeout = 2000
def send(self, address, request):
self.log.info('Sending request on {1}: {0}', request, address)
socket = self.connect(address)
socket.send_json(request)
self.log.info('Waiting for reply')
events = self.poll_reply(socket)
if not events:
msg = 'Timeout after {0}ms'.format(self.timeout)
self.log.error(msg)
self.close(socket)
raise RequestError('TIMEOUT', msg)
# TODO: Can JSON events come in multiparts? Probably not?
response = events[0][0].recv_json()
self.log.info(response)
self.close(socket)
return response
def close(self, socket):
socket.close()
# TODO: Figure out why destroying the context makes the application
# hang indefinetely.
# self.context.destroy()
# del self.context
def connect(self, address):
self.context = zmq.Context()
socket = self.context.socket(zmq.REQ)
host = 'tcp://{0}'.format(address)
socket.connect(host)
return socket
def poll_reply(self, socket):
poller = zmq.Poller()
poller.register(socket, zmq.POLLIN)
events = poller.poll(self.timeout)
return events
| mit | 4,768,687,102,994,836,000 | 24.839286 | 75 | 0.608155 | false | 4.019444 | false | false | false |
orlox/massive_bins_2015 | 2016_ULX/scripts/NSBH/kick_dist.py | 1 | 2053 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import matplotlib as mpl
from pylab import *
import numpy as np
import sys
sys.path.insert(0, '../')
import kicks
from scipy.stats import maxwell
params = {'backend': 'pdf',
'figure.figsize': [4.3, 2.2],
'font.family':'serif',
'font.size':10,
'font.serif': 'Times Roman',
'axes.titlesize': 'medium',
'axes.labelsize': 'medium',
'legend.fontsize': 8,
'legend.frameon' : False,
'text.usetex': True,
'figure.dpi': 600,
'lines.markersize': 4,
'lines.linewidth': 3,
'lines.antialiased': False,
'path.simplify': False,
'legend.handlelength':3,
'figure.subplot.bottom':0.2,
'figure.subplot.top':0.95,
'figure.subplot.left':0.15,
'figure.subplot.right':0.92}
hexcols = ['#332288', '#88CCEE', '#44AA99', '#117733', '#999933', '#DDCC77',\
'#CC6677', '#882255', '#AA4499', '#661100', '#6699CC', '#AA4466','#4477AA']
mpl.rcParams.update(params)
A=np.array([np.append([vkick],kicks.sample_kick_distribution_P(23,5.5,55,1.4,vdist=lambda x:[float(vkick)], num_v=5, num_theta=400,num_phi=100)) for vkick in range(0,701,5)])
print(A)
print(A[:,0])
print(A[:,1])
print(A[:,2])
fig, axes= plt.subplots(1)
maxw = axes.fill_between(A[:,0],0,maxwell.pdf(A[:,0], scale=265.)/max(maxwell.pdf(A[:,0],scale=265.)),color="b", alpha=0.2, label="Maxwellian, $\\sigma=265~\\rm km~s^{-1}$")
merge, = axes.plot(A[:,0],10*A[:,1], color=hexcols[2],label="GW merge fraction $\\times$ 10")
disrupt, = axes.plot(A[:,0],A[:,2], color=hexcols[8],ls="--", label="Disrupt fraction")
axes.set_xlabel("$v_{\\rm kick}~\\rm[km~s^{-1}]$")
axes.set_ylabel("fraction")
#axes.set_xlim([0,50])
axes.set_ylim([0,1.19])
axes.legend([maxw,merge,disrupt],["Maxwellian, $\\sigma=265~\\rm km~s^{-1}$", "GW merge fraction $\\times$ 10", "Disrupt fraction"], loc="upper left", fontsize=7)
plt.savefig("kick_dist.pdf")
#plt.clf()
#plt.close(plt.gcf())
| gpl-3.0 | 2,113,585,709,847,529,700 | 35.017544 | 174 | 0.594252 | false | 2.748327 | false | false | false |
aniruddha-adhikary/bookit | bookit/bookings/models/booking.py | 1 | 1448 | from django.apps import apps
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_fsm import FSMField, transition
from model_utils import Choices
class Booking(models.Model):
STATES = Choices(
('REQUESTED', 'requested', _('Requested')),
('APPROVED', 'approved', _('Approved')),
('REJECTED', 'rejected', _('Rejected')),
('CANCELLED', 'cancelled', _('Cancelled'))
)
service = models.ForeignKey(
to='providers.ProviderService'
)
status = FSMField(
default=STATES.requested
)
booked_by = models.ForeignKey(
to=settings.AUTH_USER_MODEL
)
booked_for = models.DateTimeField()
booked_on = models.DateTimeField(
auto_now_add=True,
editable=False
)
last_updated = models.DateTimeField(
auto_now=True
)
@transition(status,
source='REQUESTED',
target='CANCELLED')
def cancel(self):
"""Cancel request"""
@transition(status,
source='REQUESTED',
target='APPROVED')
def approve(self):
"""Approve request"""
Ticket = apps.get_model('bookings', 'Ticket')
Ticket(booking=self).save()
@transition(status,
source='REQUESTED',
target='REJECTED')
def reject(self):
"""Reject request"""
| mit | -1,215,097,377,576,194,600 | 23.542373 | 55 | 0.585635 | false | 4.172911 | false | false | false |
lamerman/shellpy | shellpython/preprocessor.py | 1 | 11269 | #!/usr/bin/env python
import os
import stat
import tempfile
import re
import getpass
import json
spy_file_pattern = re.compile(r'(.*)\.spy$')
shellpy_meta_pattern = re.compile(r'#shellpy-meta:(.*)')
shellpy_encoding_pattern = '#shellpy-encoding'
def preprocess_module(module_path):
"""The function compiles a module in shellpy to a python module, walking through all the shellpy files inside of
the module and compiling all of them to python
:param module_path: The path of module
:return: The path of processed module
"""
for item in os.walk(module_path):
path, dirs, files = item
for file in files:
if spy_file_pattern.match(file):
filepath = os.path.join(path, file)
preprocess_file(filepath, is_root_script=False)
return _translate_to_temp_path(module_path)
def preprocess_file(in_filepath, is_root_script, python_version=None):
"""Coverts a single shellpy file to python
:param in_filepath: The path of shellpy file to be processed
:param is_root_script: Shows whether the file being processed is a root file, which means the one
that user executed
:param python_version: version of python, needed to set correct header for root scripts
:return: The path of python file that was created of shellpy script
"""
new_filepath = spy_file_pattern.sub(r"\1.py", in_filepath)
out_filename = _translate_to_temp_path(new_filepath)
out_folder_path = os.path.dirname(out_filename)
if not is_root_script and not _is_compilation_needed(in_filepath, out_filename):
# TODO: cache root also
# TODO: if you don't compile but it's root, you need to change to exec
return out_filename
if not os.path.exists(out_folder_path):
os.makedirs(out_folder_path, mode=0o700)
header_data = _get_header(in_filepath, is_root_script, python_version)
with open(in_filepath, 'r') as f:
code = f.read()
out_file_data = _add_encoding_to_header(header_data, code)
intermediate = _preprocess_code_to_intermediate(code)
processed_code = _intermediate_to_final(intermediate)
out_file_data += processed_code
with open(out_filename, 'w') as f:
f.write(out_file_data)
in_file_stat = os.stat(in_filepath)
os.chmod(out_filename, in_file_stat.st_mode)
if is_root_script:
os.chmod(out_filename, in_file_stat.st_mode | stat.S_IEXEC)
return out_filename
def _get_username():
"""Returns the name of current user. The function is used in construction of the path for processed shellpy files on
temp file system
:return: The name of current user
"""
try:
n = getpass.getuser()
return n
except:
return 'no_username_found'
def _translate_to_temp_path(path):
"""Compiled shellpy files are stored on temp filesystem on path like this /{tmp}/{user}/{real_path_of_file_on_fs}
Every user will have its own copy of compiled shellpy files. Since we store them somewhere else relative to
the place where they actually are, we need a translation function that would allow us to easily get path
of compiled file
:param path: The path to be translated
:return: The translated path
"""
absolute_path = os.path.abspath(path)
relative_path = os.path.relpath(absolute_path, os.path.abspath(os.sep))
# TODO: this will not work in win where root is C:\ and absolute_in_path
# is on D:\
translated_path = os.path.join(tempfile.gettempdir(), 'shellpy_' + _get_username(), relative_path)
return translated_path
def _is_compilation_needed(in_filepath, out_filepath):
"""Shows whether compilation of input file is required. It may be not required if the output file did not change
:param in_filepath: The path of shellpy file to be processed
:param out_filepath: The path of the processed python file. It may exist or not.
:return: True if compilation is needed, False otherwise
"""
if not os.path.exists(out_filepath):
return True
in_mtime = os.path.getmtime(in_filepath)
with open(out_filepath, 'r') as f:
for i in range(0, 3): # scan only for three first lines
line = f.readline()
line_result = shellpy_meta_pattern.search(line)
if line_result:
meta = line_result.group(1)
meta = json.loads(meta)
if str(in_mtime) == meta['mtime']:
return False
return True
def _get_header(filepath, is_root_script, python_version):
"""To execute converted shellpy file we need to add a header to it. The header contains needed imports and
required code
:param filepath: A shellpy file that is being converted. It is needed to get modification time of it and save it
to the created python file. Then this modification time will be used to find out whether recompilation is needed
:param is_root_script: Shows whether the file being processed is a root file, which means the one
that user executed
:param python_version: version of python, needed to set correct header for root scripts
:return: data of the header
"""
header_name = 'header_root.tpl' if is_root_script else 'header.tpl'
header_filename = os.path.join(os.path.dirname(__file__), header_name)
with open(header_filename, 'r') as f:
header_data = f.read()
mod_time = os.path.getmtime(filepath)
meta = {'mtime': str(mod_time)}
header_data = header_data.replace('{meta}', json.dumps(meta))
if is_root_script:
executables = {
2: '#!/usr/bin/env python',
3: '#!/usr/bin/env python3'
}
header_data = header_data.replace('#shellpy-python-executable', executables[python_version])
return header_data
def _preprocess_code_to_intermediate(code):
"""Before compiling to actual python code all expressions are converted to universal intermediate form
It is very convenient as it is possible to perform common operations for all expressions
The intermediate form looks like this:
longline_shexe(echo 1)shexe(p)shexe
:param code: code to convert to intermediate form
:return: converted code
"""
processed_code = _process_multilines(code)
processed_code = _process_long_lines(processed_code)
processed_code = _process_code_both(processed_code)
processed_code = _process_code_start(processed_code)
return _escape(processed_code)
def _process_multilines(script_data):
"""Converts a pyshell multiline expression to one line pyshell expression, each line of which is separated
by semicolon. An example would be:
f = `
echo 1 > test.txt
ls -l
`
:param script_data: the string of the whole script
:return: the shellpy script with multiline expressions converted to intermediate form
"""
code_multiline_pattern = re.compile(r'^([^`\n\r]*?)([a-z]*)`\s*?$[\n\r]{1,2}(.*?)`\s*?$', re.MULTILINE | re.DOTALL)
script_data = code_multiline_pattern.sub(r'\1multiline_shexe(\3)shexe(\2)shexe', script_data)
pattern = re.compile(r'multiline_shexe.*?shexe', re.DOTALL)
new_script_data = script_data
for match in pattern.finditer(script_data):
original_str = script_data[match.start():match.end()]
processed_str = re.sub(r'([\r\n]{1,2})', r'; \\\1', original_str)
new_script_data = new_script_data.replace(
original_str, processed_str)
return new_script_data
def _process_long_lines(script_data):
"""Converts to python a pyshell expression that takes more than one line. An example would be:
f = `echo The string \
on several \
lines
:param script_data: the string of the whole script
:return: the shellpy script converted to intermediate form
"""
code_long_line_pattern = re.compile(r'([a-z]*)`(((.*?\\\s*?$)[\n\r]{1,2})+(.*$))', re.MULTILINE)
new_script_data = code_long_line_pattern.sub(r'longline_shexe(\2)shexe(\1)shexe', script_data)
return new_script_data
def _process_code_both(script_data):
"""Converts to python a pyshell script that has ` symbol both in the beginning of expression and in the end.
An example would be:
f = `echo 1`
:param script_data: the string of the whole script
:return: the shellpy script converted to intermediate form
"""
code_both_pattern = re.compile(r'([a-z]*)`(.*?)`')
new_script_data = code_both_pattern.sub(r'both_shexe(\2)shexe(\1)shexe', script_data)
return new_script_data
def _process_code_start(script_data):
"""Converts to python a pyshell script that has ` symbol only in the beginning. An example would be:
f = `echo 1
:param script_data: the string of the whole script
:return: the shellpy script converted to intermediate form
"""
code_start_pattern = re.compile(r'^([^\n\r`]*?)([a-z]*)`([^`\n\r]+)$', re.MULTILINE)
new_script_data = code_start_pattern.sub(r'\1start_shexe(\3)shexe(\2)shexe', script_data)
return new_script_data
def _escape(script_data):
"""Escapes shell commands
:param script_data: the string of the whole script
:return: escaped script
"""
pattern = re.compile(r'[a-z]*_shexe.*?shexe', re.DOTALL)
new_script_data = script_data
for match in pattern.finditer(script_data):
original_str = script_data[match.start():match.end()]
if original_str.find('\'') != -1:
processed_str = original_str.replace('\'', '\\\'')
new_script_data = new_script_data.replace(
original_str, processed_str)
return new_script_data
def _intermediate_to_final(script_data):
"""All shell blocks are first compiled to intermediate form. This part of code converts the intermediate
to final python code
:param script_data: the string of the whole script
:return: python script ready to be executed
"""
intermediate_pattern = re.compile(r'[a-z]*_shexe\((.*?)\)shexe\((.*?)\)shexe', re.MULTILINE | re.DOTALL)
final_script = intermediate_pattern.sub(r"exe('\1'.format(**dict(locals(), **globals())),'\2')", script_data)
return final_script
def _add_encoding_to_header(header_data, script_data):
"""PEP-0263 defines a way to specify python file encoding. If this encoding is present in first
two lines of a shellpy script it will then be moved to the top generated output file
:param script_data: the string of the whole script
:return: the script with the encoding moved to top, if it's present
"""
encoding_pattern = re.compile(r'^(#[-*\s]*coding[:=]\s*([-\w.]+)[-*\s]*)$')
# we use \n here instead of os.linesep since \n is universal as it is present in all OSes
# when \r\n returned by os.linesep may not work if you run against unix files from win
first_two_lines = script_data.split('\n')[:2]
for line in first_two_lines:
encoding = encoding_pattern.search(line)
if encoding is not None:
break
if not encoding:
return header_data
else:
new_header_data = header_data.replace(shellpy_encoding_pattern, encoding.group(1))
return new_header_data
| bsd-3-clause | 8,372,486,215,485,364,000 | 36.563333 | 120 | 0.663235 | false | 3.71057 | false | false | false |
viftodi/snotes | snotes_persistence.py | 1 | 3549 | empty = "empty"
class Entry:
def __init__(self,
creation_timestamp=None,
update_timestamp=None,
tags=None,
value=None):
self.creation_timestamp = creation_timestamp
self.update_timestamp = update_timestamp
if tags is None:
self.tags = []
else:
self.tags = tags
self.value = value
def merge_with(self, other):
merge_into(self.tags, other.tags)
self.update_timestamp = other.update_timestamp
@staticmethod
def from_string(s, all_tags):
entry = Entry()
words = s.split(' ', 3)
creation_timestamp = float(words[0])
update_timestamp = float(words[1])
if words[2] != empty:
tags = deserialize_tags(words[2], all_tags)
else:
tags = []
value = words[3]
return Entry(creation_timestamp, update_timestamp, tags, value)
def to_string(self, all_tags):
tags_s = None
if self.tags == []:
tags_s = empty
else:
tags_s = serialize_tags(self.tags, all_tags)
return "{0} {1} {2} {3}".format(self.creation_timestamp,
self.update_timestamp,
tags_s,
self.value)
class Journal:
def __init__(self, tags=None, entries=None):
if tags is None:
self.tags = []
else:
self.tags = tags
if entries is None:
self.entries = []
else:
self.entries = entries
def merge_tags(self, tags):
merge_into(self.tags, tags)
def add_tag(self, tag):
self.tags.append(tag)
def add_entry(self, entry):
self.entries.append(entry)
def add_or_merge_entry(self, entry):
self.merge_tags(entry.tags)
match = filter(lambda e: e.value == entry.value, self.entries)
if match:
match[0].merge_with(entry)
else:
self.add_entry(entry)
def to_file(self, file_name):
with open(file_name, 'w') as storage:
storage.write(' '.join(self.tags))
storage.write('\n')
for entry in self.entries:
storage.write(entry.to_string(self.tags))
storage.write('\n')
def get_entries(self, tag_filter, sort):
filtered_entries = filter(tag_filter, self.entries)
return sorted(filtered_entries, key=sort, reverse=True)
@staticmethod
def from_file(file_name):
journal = Journal()
with open(file_name, 'r') as storage:
line = storage.readline().rstrip()
if line != '':
journal.tags = line.split(' ')
line = storage.readline().rstrip()
while line != '':
entry = Entry.from_string(line, journal.tags)
journal.add_entry(entry)
line = storage.readline().rstrip()
return journal
def filter_tags_inclusive(tags1, tags2):
if not tags1:
return True
for tag in tags1:
if tag in tags2:
return True
return False
def serialize_tags(tags, all_tags):
return ','.join(map(lambda tag: str(all_tags.index(tag)), tags))
def deserialize_tags(s, all_tags):
return map(lambda v: all_tags[v], map(int, s.split(',')))
def merge_into(lst1, lst2):
for v in lst2:
if v not in lst1:
lst1.append(v)
| mit | 6,676,937,256,555,736,000 | 26.3 | 71 | 0.524937 | false | 3.992126 | false | false | false |
djlauk/cala | cala/cala.py | 1 | 1042 | """
cala: The court allocation application
Copyright (C) 2017 Daniel J. Lauk <[email protected]>
cala is open sourced under the terms of the MIT license.
For details see LICENSE.md.
"""
import os
import sys
import tempfile
from flask import Flask, session, redirect, url_for, escape, request, abort, make_response
app = Flask(__name__)
app.config.from_object(__name__)
app.config.update(dict(
DATABASE=os.path.join(tempfile.gettempdir(), 'cala.sqlite'),
PASSWORD_SALT='development salt',
SECRET_KEY='development key',
FIRST_DAY_OF_WEEK=1, # 1 = Monday ... 7 = Sunday
LAST_DAY_OF_WEEK=5, # 1 = Monday ... 7 = Sunday
MIN_PASSWORD_LENGTH=6,
MIN_PASSWORD_CLASSES=3,
SHORT_NOTICE_START='08:00', # short notice games can be booked past this time
MAX_FREE_GAMES_PER_WEEK=1,
BOOKING_POINTS_PER_WEEK=3,
BOOKING_POINTS_WINDOW=(5, 3) # (number of games, number of weeks)
))
app.config.from_envvar('CALA_SETTINGS', silent=True)
import cala.views
import cala.database
cala.database.init_db()
| mit | 3,757,820,997,018,096,000 | 27.944444 | 90 | 0.698656 | false | 3.011561 | false | false | false |
UGentPortaal/django-ldapdb-archived | settings.py | 2 | 3162 | # Django settings for django-ldapdb project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'ldapdb.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
},
'ldap': {
'ENGINE': 'ldapdb.backends.ldap',
'NAME': 'ldap://',
'USER': 'cn=admin,dc=nodomain',
'PASSWORD': 'test',
}
}
DATABASE_ROUTERS = ['ldapdb.router.Router']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'some_random_secret_key'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'ldapdb',
'examples',
'django.contrib.admin',
)
LDAPDB_BIND_DN="cn=admin,dc=nodomain"
LDAPDB_BIND_PASSWORD="test"
LDAPDB_SERVER_URI="ldap://"
| bsd-3-clause | -931,346,338,319,461,800 | 29.114286 | 88 | 0.693232 | false | 3.529018 | false | false | false |
shridharmishra4/tamtam | Jam/RythmGenerator.py | 2 | 3063 | import random
import common.Util.InstrumentDB as InstrumentDB
import common.Config as Config
from common.Util.CSoundNote import CSoundNote
from common.Generation.GenerationConstants import GenerationConstants
from GenRythm import GenRythm
instrumentDB = InstrumentDB.getRef()
def generator( instrument, nbeats, density, regularity, reverbSend ):
makeRythm = GenRythm()
noteDuration = GenerationConstants.DOUBLE_TICK_DUR / 2
trackId = 0
pan = 0.5
attack = 0.005
decay = 0.095
filterType = 0
filterCutoff = 1000
tied = False
mode = 'mini'
def makePitchSequence(length, drumPitch):
pitchSequence = []
append = pitchSequence.append
list = range(length)
max = len(drumPitch) - 1
for i in list:
append(drumPitch[ random.randint( 0, max ) ] )
return pitchSequence
def makeGainSequence( onsetList ):
gainSequence = []
append = gainSequence.append
for onset in onsetList:
if onset == 0:
gain = random.uniform(GenerationConstants.GAIN_MID_MAX_BOUNDARY, GenerationConstants.GAIN_MAX_BOUNDARY)
elif ( onset % Config.TICKS_PER_BEAT) == 0:
gain = random.uniform(GenerationConstants.GAIN_MID_MIN_BOUNDARY, GenerationConstants.GAIN_MID_MAX_BOUNDARY)
else:
gain = random.uniform(GenerationConstants.GAIN_MIN_BOUNDARY, GenerationConstants.GAIN_MID_MIN_BOUNDARY)
append(gain)
return gainSequence
def pageGenerate( regularity, drumPitch ):
barLength = Config.TICKS_PER_BEAT * nbeats
#print 'pageGenerate drumPitch[0] ', drumPitch[0]
currentInstrument = instrumentDB.instNamed[ instrument ].kit[ drumPitch[0] ]
rythmSequence = makeRythm.drumRythmSequence(currentInstrument, nbeats, density, regularity)
pitchSequence = makePitchSequence(len(rythmSequence), drumPitch )
gainSequence = makeGainSequence(rythmSequence)
trackNotes = []
list = range(len(rythmSequence))
for i in list:
trackNotes.append( CSoundNote( rythmSequence[i], pitchSequence[i], gainSequence[i],
pan, noteDuration, trackId,
instrumentDB.instNamed[instrument].instrumentId, attack,
decay, reverbSend, filterType, filterCutoff, tied, mode))
return trackNotes
##################################################################################
# begin generate()
if regularity > 0.75:
streamOfPitch = GenerationConstants.DRUM_COMPLEXITY1
elif regularity > 0.5:
streamOfPitch = GenerationConstants.DRUM_COMPLEXITY2
elif regularity > 0.25:
streamOfPitch = GenerationConstants.DRUM_COMPLEXITY3
else:
streamOfPitch = GenerationConstants.DRUM_COMPLEXITY4
trackNotes = []
for drumPitch in streamOfPitch:
trackNotes.append(pageGenerate( regularity, drumPitch ))
return trackNotes
| gpl-2.0 | 3,517,159,044,149,858,000 | 37.2875 | 123 | 0.63761 | false | 3.906888 | false | false | false |
knossos-project/knossos_aam | knossos_aam_backend/aam_interaction.py | 1 | 13163 | """
Set of functions for interacting (reading / writing) with the AAM. Business
logic goes here.
"""
__author__ = 'Fabian Svara'
import re
import zipfile
from cStringIO import StringIO
from django.db import transaction
from django.utils import timezone
from general_utilities.versions import compare_version
from knossos_utils.skeleton import Skeleton
import checks
import models
import view_helpers
class NonEmptyWork(Exception):
pass
def delete_submission(s):
if s.worktime:
s.work.worktime = s.work.worktime - s.worktime
s.work.save()
s.delete()
def get_active_work(em):
"""
Return active works for employee em.
Parameters
----------
em : Employee instance
Returns
-------
active_work : list of Work instances
"""
active_work = models.Work.objects.filter(
employee=em,
is_final=False,
)
active_work = list(active_work)
active_work = sorted(active_work, key=lambda x: x.pk)
return active_work
def get_completed_work(em):
"""
Return completed works for employee em.
Parameters
----------
em : Employee instance
Returns
-------
completed_work : list of Work instances
"""
completed_work = models.Work.objects.filter(
employee=em,
is_final=True,
)
completed_work = list(completed_work)
return completed_work
def get_available_tasks(em, count=1):
"""
Return available tasks for employee em.
Parameters
----------
em : Employee instance
count : int
Number of Tasks per category to return
Returns
-------
available_tasks_by_cat : dict of str -> list Task instances
Maps category name to list of Tasks available in that category
for employee em, where the tasks within the same category are sorted by
primary key
available_tasks : list of Task instances
Task instances sorted by primary key
"""
available_tasks_by_cat = {}
available_tasks = []
if em.project is None:
return None, None
for curCategory in em.project.taskcategory_set.all():
cur_tasks = curCategory.task_set.filter(
is_active=True, priority__gt=-1).exclude(employee=em)
if len(cur_tasks) > 0:
cur_tasks = list(cur_tasks)
cur_tasks = sorted(
cur_tasks, key=lambda x: x.priority, reverse=True)
available_tasks_by_cat[curCategory] = cur_tasks[0:count]
available_tasks.extend(cur_tasks)
available_tasks = sorted(
available_tasks, key=lambda x: x.priority, reverse=True)
return available_tasks_by_cat, available_tasks
def reset_task(task, username):
s = models.Submission.objects.filter(
employee__user__username=username,
work__task__name=task)
s.delete()
w = models.Work.objects.get(employee__user__username__exact=username,
task__name=task)
w.worktime = 0.
w.is_final = False
w.latestsubmit = None
w.last_submission = None
w.save()
def unfinalize_work(work_id):
w = models.Work.objects.get(pk=work_id)
w.is_final = False
w.save()
def cancel_task(task, username):
w = models.Work.objects.get(
task__name=task,
employee__user__username=username, )
if not w.submission_set.all():
w.delete()
else:
raise NonEmptyWork('Submissions exist for this Work. Not deleting.')
def choose_task(employee, task_id):
active_work = models.Work.objects.filter(
employee=employee, is_final=False)
if not len(active_work) == 0:
raise view_helpers.TooManyActiveTasks()
task = models.Task.objects.get(pk=task_id)
if task.target_coverage > task.current_coverage:
models.Work.objects.create(
started=timezone.now(),
task=models.Task.objects.get(pk=task_id),
employee=employee,
is_final=False, )
else:
raise view_helpers.UserRace()
return
def submit(employee, submit_file, submit_comment, submit_is_final,
submit_work_id, skip_checks=False):
"""Parses the submitted file, extracts the worktime and tests the nml.
For submissions which are done on worktime tasks, the submission
is created without any file.
For regular submissions, the file name is checked on length.
It is checked if the nml file was saved and created in the
current version of Knossos.
Parameters:
----------
employee: Employee object
Employee related to the submission
submit_file: file object
submitted nml file
submit comment: string
comment which was submitted together with the submission
submit_is_final: bool
True for final submission
submit_work_id: integer
id of the work related to this submission
Returns:
----------
incremental_worktime: float
calculated worktime on this submission
work: Work object
automatic_worktime: bool
True if worktime should be calculated
Raises:
----------
InvalidSubmission:
if the filename is longer than 200 characters
if the file was created/saved in an old version of Knossos
if the worktime is lower than the one of the previous submission
ImportError:
if a check could not be imported from the Checks file
DoesNotExist:
if the Work object is not found
ParseError:
if the file cannot be opened by the Skeleton class.
"""
if len(submit_file.name) > 200:
raise view_helpers.InvalidSubmission(
'The maximal file name length for submissions is '
'200 character.')
work = models.Work.objects.get(pk=submit_work_id)
# testing for .k.zip is problematic, just do zip - django itself removes
# the k sometimes (e.g. when changing the filename of task files
# on uploading them by adding random chars)
if submit_file.name.endswith('.zip'):
fp = StringIO(submit_file.read())
zipper = zipfile.ZipFile(fp, 'r')
if 'annotation.xml' not in zipper.namelist():
raise Exception('k.zip broken.')
skeleton_file_as_string = zipper.read('annotation.xml')
else:
skeleton_file_as_string = submit_file.read()
checks_to_run = re.split('\W', work.task.checks)
checks_to_run = [x for x in checks_to_run if x]
if checks_to_run and not skip_checks:
check_fns = dict()
for cur_check in checks_to_run:
exec ('from knossos_aam_backend.checks import {0}'.format(cur_check))
cur_check_fn = locals()[cur_check]
check_fns[cur_check] = cur_check_fn
skeleton = Skeleton()
skeleton.fromNmlString(skeleton_file_as_string,
use_file_scaling=True)
# Keyword arguments for check functions
#
kwargs = {'skeleton': skeleton,
'work': work,
'employee': employee,
'submit_file': submit_file,
'submit_comment': submit_comment,
'submit_work_id': submit_work_id,
'submit_is_final': submit_is_final,
'submit_file_as_string': skeleton_file_as_string, }
# Check whether the knossos version is high enough
version = skeleton.get_version()
# Has work time tracking
if compare_version(version['saved'], (4, 1, 2)) == '<':
raise view_helpers.InvalidSubmission(
"This tracing was saved in a version "
"of Knossos that is too old and incompatible with "
"knossos_aam. Please upgrade to version 4.1.2, "
"available "
"from www.knossostool.org, save the file again in "
"that version, and resubmit.")
else:
# All fine, newest version.
pass
if 'automatic_worktime' not in checks_to_run:
incremental_worktime = None
auto_worktime = False
output = checks.automatic_worktime(**kwargs)
else:
auto_worktime = True
output = checks.automatic_worktime(**kwargs)
if type(output) == str:
raise view_helpers.InvalidSubmission(output)
else:
incremental_worktime = output
del check_fns['automatic_worktime']
# Here is the part where the tests are done
#
for cur_check in check_fns:
output = eval(cur_check)(**kwargs)
if type(output) == str:
raise view_helpers.InvalidSubmission(output)
if 'automatic_worktime' in checks_to_run and incremental_worktime:
work.worktime = work.worktime + incremental_worktime
work.save()
else:
incremental_worktime = None
auto_worktime = False
# Send e-mail if comment is added to submission.
if submit_comment:
subject = 'Comment on Submission of Task {0} Task from {1}'.format(work.task.name, employee.user.username)
attachments = [(skeleton_file_as_string, submit_file.name)]
# todo get mailing to work again
# mail_notify('[email protected]', subject, submit_comment,
# attachments=attachments, reply_to=work.employee.user.email)
s = models.Submission.objects.create(
employee=employee,
date=timezone.now(),
work=work,
comment=submit_comment,
is_final=submit_is_final,
worktime=incremental_worktime,
original_filename=submit_file.name[0:200],
datafile=submit_file, )
s.save()
def get_monthly_worktime_for_submissions(submission_set):
""" Calculate how much of the work time has been spent in different months
Parameters:
----------
submission_set: QuerySet(Submission)
Returns:
----------
set {by_month_per_task, by_month_totals}
by_month_per_task: { year: { month: { task: [worktime, is_final] } } }
by_month_totals: { year: { month: [worktime, is_final] } }
"""
by_month_per_task = {}
by_month_totals = {}
s = submission_set.order_by('date')
for curs in s:
year = curs.date.year
month = curs.date.month
task = curs.work.task
incomplete_time = False
if curs.worktime is None:
cur_worktime = 0.
incomplete_time = True
else:
cur_worktime = curs.worktime
if year not in by_month_per_task:
by_month_per_task[year] = {}
by_month_totals[year] = {}
if month not in by_month_per_task[year]:
by_month_per_task[year][month] = {}
# Second item in tuple indicates whether the worktime
# is incomplete, i.e. work was performed on tasks
# for which worktime is not automatically computed
by_month_totals[year][month] = [0, False]
if task not in by_month_per_task[year][month]:
by_month_per_task[year][month][task] = [0, False]
if incomplete_time:
by_month_per_task[year][month][task][1] = True
by_month_totals[year][month][1] = True
by_month_per_task[year][month][task][0] = \
by_month_per_task[year][month][task][0] + cur_worktime
by_month_totals[year][month][0] = \
by_month_totals[year][month][0] + cur_worktime
return {'by_month_per_task': by_month_per_task,
'by_month_totals': by_month_totals, }
def get_monthly_worktime_for_work(w):
return get_monthly_worktime_for_submissions(w.submission_set)
def get_employee_info(emp):
work = get_active_work(emp)
info = {"name": " ".join([emp.user.first_name, emp.user.last_name]),
"username": " ".join([emp.user.username]),
"project": emp.project.name}
if len(work) > 0:
work = work[0]
info["task_name"] = work.task.name
info["work_time"] = work.worktime
info["last_submit"] = work.last_submission.datafile
return info
def get_employees_current_work():
emp_set = {}
for emp in models.Employee.objects.all():
emp_set[emp] = get_employee_info(emp)
return emp_set
def get_employee_infos_in_project(proj):
employees = models.Employee.objects.filter(project=proj)
emp_infos = []
for emp in employees:
emp_infos.append(get_employee_info(emp))
return emp_infos
def move_employees_to_project(employees, new_project):
with transaction.atomic():
for employee in employees:
employee.project = new_project
employee.save()
| gpl-2.0 | 6,620,236,176,597,662,000 | 27.057395 | 114 | 0.588088 | false | 4.048908 | false | false | false |
Jonekee/chromium.src | chrome/common/extensions/docs/server2/document_renderer.py | 85 | 4614 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
from document_parser import ParseDocument
from platform_util import ExtractPlatformFromURL
from third_party.json_schema_compiler.model import UnixName
class DocumentRenderer(object):
'''Performs document-level rendering such as the title, references,
and table of contents: pulling that data out of the document, then
replacing the $(title), $(ref:...) and $(table_of_contents) tokens with them.
This can be thought of as a parallel to TemplateRenderer; while
TemplateRenderer is responsible for interpreting templates and rendering files
within the template engine, DocumentRenderer is responsible for interpreting
higher-level document concepts like the title and TOC, then performing string
replacement for them. The syntax for this replacement is $(...) where ... is
the concept. Currently title and table_of_contents are supported.
'''
def __init__(self, table_of_contents_renderer, platform_bundle):
self._table_of_contents_renderer = table_of_contents_renderer
self._platform_bundle = platform_bundle
def _RenderLinks(self, document, path):
''' Replaces all $(ref:...) references in |document| with html links.
References have two forms:
$(ref:api.node) - Replaces the reference with a link to node on the
API page. The title is set to the name of the node.
$(ref:api.node Title) - Same as the previous form, but title is set
to "Title".
'''
START_REF = '$(ref:'
END_REF = ')'
MAX_REF_LENGTH = 256
new_document = []
# Keeps track of position within |document|
cursor_index = 0
start_ref_index = document.find(START_REF)
while start_ref_index != -1:
end_ref_index = document.find(END_REF, start_ref_index)
if (end_ref_index == -1 or
end_ref_index - start_ref_index > MAX_REF_LENGTH):
end_ref_index = document.find(' ', start_ref_index)
logging.error('%s:%s has no terminating ) at line %s' % (
path,
document[start_ref_index:end_ref_index],
document.count('\n', 0, end_ref_index)))
new_document.append(document[cursor_index:end_ref_index + 1])
else:
ref = document[start_ref_index:end_ref_index]
ref_parts = ref[len(START_REF):].split(None, 1)
# Guess the api name from the html name, replacing '_' with '.' (e.g.
# if the page is app_window.html, guess the api name is app.window)
api_name = os.path.splitext(os.path.basename(path))[0].replace('_', '.')
title = ref_parts[0] if len(ref_parts) == 1 else ref_parts[1]
platform = ExtractPlatformFromURL(path)
if platform is None:
logging.error('Cannot resolve reference without a platform.')
continue
ref_dict = self._platform_bundle.GetReferenceResolver(
platform).SafeGetLink(ref_parts[0],
namespace=api_name,
title=title,
path=path)
new_document.append(document[cursor_index:start_ref_index])
new_document.append('<a href=%s/%s>%s</a>' % (
self._platform_bundle._base_path + platform,
ref_dict['href'],
ref_dict['text']))
cursor_index = end_ref_index + 1
start_ref_index = document.find(START_REF, cursor_index)
new_document.append(document[cursor_index:])
return ''.join(new_document)
def Render(self, document, path, render_title=False):
''' |document|: document to be rendered.
|path|: request path to the document.
|render_title|: boolean representing whether or not to render a title.
'''
# Render links first so that parsing and later replacements aren't
# affected by $(ref...) substitutions
document = self._RenderLinks(document, path)
parsed_document = ParseDocument(document, expect_title=render_title)
toc_text, toc_warnings = self._table_of_contents_renderer.Render(
parsed_document.sections)
# Only 1 title and 1 table of contents substitution allowed; in the common
# case, save necessarily running over the entire file.
if parsed_document.title:
document = document.replace('$(title)', parsed_document.title, 1)
return (document.replace('$(table_of_contents)', toc_text, 1),
parsed_document.warnings + toc_warnings)
| bsd-3-clause | -8,379,112,430,894,674,000 | 40.196429 | 80 | 0.647378 | false | 4.083186 | false | false | false |
wuher/diablo | diablo/mappers/xmlrpcmapper.py | 1 | 1127 | # -*- coding: utf-8 -*-
# xmlrpcmapper.py ---
#
# Created: Wed Apr 11 15:40:26 2012 (-0600)
# Author: Patrick Hull
#
import xmlrpclib
from diablo.datamapper import DataMapper
from diablo import http
class XmlRpcMapper(DataMapper):
"""XML-RPC mapper
The mapper must be set using the format arg or explicitly in the
resource, otherwise XmlMapper will be used for content-type text/xml.
"""
content_type = 'text/xml'
def __init__(self, methodresponse=True, allow_none=True):
self.methodresponse = methodresponse
self.allow_none = allow_none
def _format_data(self, data, charset):
try:
return xmlrpclib.dumps((data,),
methodresponse=self.methodresponse,
allow_none=self.allow_none,
encoding=charset)
except TypeError, err:
raise http.InternalServerError('unable to encode data')
def _parse_data(self, data, charset):
try:
return xmlrpclib.loads(data)
except ValueError:
raise http.BadRequest('unable to parse data')
| mit | 6,107,612,546,182,512,000 | 26.487805 | 73 | 0.618456 | false | 4.098182 | false | false | false |
huard/scipy-work | scipy/interpolate/fitpack2.py | 1 | 21289 | """
fitpack --- curve and surface fitting with splines
fitpack is based on a collection of Fortran routines DIERCKX
by P. Dierckx (see http://www.netlib.org/dierckx/) transformed
to double routines by Pearu Peterson.
"""
# Created by Pearu Peterson, June,August 2003
__all__ = [
'UnivariateSpline',
'InterpolatedUnivariateSpline',
'LSQUnivariateSpline',
'BivariateSpline',
'LSQBivariateSpline',
'SmoothBivariateSpline',
'RectBivariateSpline']
import warnings
from numpy import zeros, concatenate, alltrue, ravel, all, diff
import dfitpack
################ Univariate spline ####################
_curfit_messages = {1:"""
The required storage space exceeds the available storage space, as
specified by the parameter nest: nest too small. If nest is already
large (say nest > m/2), it may also indicate that s is too small.
The approximation returned is the weighted least-squares spline
according to the knots t[0],t[1],...,t[n-1]. (n=nest) the parameter fp
gives the corresponding weighted sum of squared residuals (fp>s).
""",
2:"""
A theoretically impossible result was found during the iteration
proces for finding a smoothing spline with fp = s: s too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
3:"""
The maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached: s
too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
10:"""
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[0]<x[1]<...<x[m-1]<=xe, w[i]>0, i=0..m-1
if iopt=-1:
xb<t[k+1]<t[k+2]<...<t[n-k-2]<xe"""
}
class UnivariateSpline(object):
""" Univariate spline s(x) of degree k on the interval
[xb,xe] calculated from a given set of data points
(x,y).
Can include least-squares fitting.
See also:
splrep, splev, sproot, spint, spalde - an older wrapping of FITPACK
BivariateSpline - a similar class for bivariate spline interpolation
"""
def __init__(self, x, y, w=None, bbox = [None]*2, k=3, s=None):
"""
Input:
x,y - 1-d sequences of data points (x must be
in strictly ascending order)
Optional input:
w - positive 1-d sequence of weights
bbox - 2-sequence specifying the boundary of
the approximation interval.
By default, bbox=[x[0],x[-1]]
k=3 - degree of the univariate spline.
s - positive smoothing factor defined for
estimation condition:
sum((w[i]*(y[i]-s(x[i])))**2,axis=0) <= s
Default s=len(w) which should be a good value
if 1/w[i] is an estimate of the standard
deviation of y[i].
"""
#_data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
data = dfitpack.fpcurf0(x,y,k,w=w,
xb=bbox[0],xe=bbox[1],s=s)
if data[-1]==1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
def _reset_class(self):
data = self._data
n,t,c,k,ier = data[7],data[8],data[9],data[5],data[-1]
self._eval_args = t[:n],c[:n],k
if ier==0:
# the spline returned has a residual sum of squares fp
# such that abs(fp-s)/s <= tol with tol a relative
# tolerance set to 0.001 by the program
pass
elif ier==-1:
# the spline returned is an interpolating spline
self._set_class(InterpolatedUnivariateSpline)
elif ier==-2:
# the spline returned is the weighted least-squares
# polynomial of degree k. In this extreme case fp gives
# the upper bound fp0 for the smoothing factor s.
self._set_class(LSQUnivariateSpline)
else:
# error
if ier==1:
self._set_class(LSQUnivariateSpline)
message = _curfit_messages.get(ier,'ier=%s' % (ier))
warnings.warn(message)
def _set_class(self, cls):
self._spline_class = cls
if self.__class__ in (UnivariateSpline, InterpolatedUnivariateSpline,
LSQUnivariateSpline):
self.__class__ = cls
else:
# It's an unknown subclass -- don't change class. cf. #731
pass
def _reset_nest(self, data, nest=None):
n = data[10]
if nest is None:
k,m = data[5],len(data[0])
nest = m+k+1 # this is the maximum bound for nest
else:
assert n<=nest,"nest can only be increased"
t,c,fpint,nrdata = data[8].copy(),data[9].copy(),\
data[11].copy(),data[12].copy()
t.resize(nest)
c.resize(nest)
fpint.resize(nest)
nrdata.resize(nest)
args = data[:8] + (t,c,n,fpint,nrdata,data[13])
data = dfitpack.fpcurf1(*args)
return data
def set_smoothing_factor(self, s):
""" Continue spline computation with the given smoothing
factor s and with the knots found at the last call.
"""
data = self._data
if data[6]==-1:
warnings.warn('smoothing factor unchanged for'
'LSQ spline with fixed knots')
return
args = data[:6] + (s,) + data[7:]
data = dfitpack.fpcurf1(*args)
if data[-1]==1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
def __call__(self, x, nu=None):
""" Evaluate spline (or its nu-th derivative) at positions x.
Note: x can be unordered but the evaluation is more efficient
if x is (partially) ordered.
"""
if nu is None:
return dfitpack.splev(*(self._eval_args+(x,)))
return dfitpack.splder(nu=nu,*(self._eval_args+(x,)))
def get_knots(self):
""" Return the positions of (boundary and interior)
knots of the spline.
"""
data = self._data
k,n = data[5],data[7]
return data[8][k:n-k]
def get_coeffs(self):
"""Return spline coefficients."""
data = self._data
k,n = data[5],data[7]
return data[9][:n-k-1]
def get_residual(self):
"""Return weighted sum of squared residuals of the spline
approximation: sum ((w[i]*(y[i]-s(x[i])))**2,axis=0)
"""
return self._data[10]
def integral(self, a, b):
""" Return definite integral of the spline between two
given points.
"""
return dfitpack.splint(*(self._eval_args+(a,b)))
def derivatives(self, x):
""" Return all derivatives of the spline at the point x."""
d,ier = dfitpack.spalde(*(self._eval_args+(x,)))
assert ier==0,`ier`
return d
def roots(self):
""" Return the zeros of the spline.
Restriction: only cubic splines are supported by fitpack.
"""
k = self._data[5]
if k==3:
z,m,ier = dfitpack.sproot(*self._eval_args[:2])
assert ier==0,`ier`
return z[:m]
raise NotImplementedError,\
'finding roots unsupported for non-cubic splines'
class InterpolatedUnivariateSpline(UnivariateSpline):
""" Interpolated univariate spline approximation. Identical to
UnivariateSpline with less error checking.
"""
def __init__(self, x, y, w=None, bbox = [None]*2, k=3):
"""
Input:
x,y - 1-d sequences of data points (x must be
in strictly ascending order)
Optional input:
w - positive 1-d sequence of weights
bbox - 2-sequence specifying the boundary of
the approximation interval.
By default, bbox=[x[0],x[-1]]
k=3 - degree of the univariate spline.
"""
#_data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
self._data = dfitpack.fpcurf0(x,y,k,w=w,
xb=bbox[0],xe=bbox[1],s=0)
self._reset_class()
class LSQUnivariateSpline(UnivariateSpline):
""" Weighted least-squares univariate spline
approximation. Appears to be identical to UnivariateSpline with
more error checking.
"""
def __init__(self, x, y, t, w=None, bbox = [None]*2, k=3):
"""
Input:
x,y - 1-d sequences of data points (x must be
in strictly ascending order)
t - 1-d sequence of the positions of user-defined
interior knots of the spline (t must be in strictly
ascending order and bbox[0]<t[0]<...<t[-1]<bbox[-1])
Optional input:
w - positive 1-d sequence of weights
bbox - 2-sequence specifying the boundary of
the approximation interval.
By default, bbox=[x[0],x[-1]]
k=3 - degree of the univariate spline.
"""
#_data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
xb=bbox[0]
xe=bbox[1]
if xb is None: xb = x[0]
if xe is None: xe = x[-1]
t = concatenate(([xb]*(k+1),t,[xe]*(k+1)))
n = len(t)
if not alltrue(t[k+1:n-k]-t[k:n-k-1] > 0,axis=0):
raise ValueError,\
'Interior knots t must satisfy Schoenberg-Whitney conditions'
data = dfitpack.fpcurfm1(x,y,k,t,w=w,xb=xb,xe=xe)
self._data = data[:-3] + (None,None,data[-1])
self._reset_class()
################ Bivariate spline ####################
_surfit_messages = {1:"""
The required storage space exceeds the available storage space: nxest
or nyest too small, or s too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
2:"""
A theoretically impossible result was found during the iteration
process for finding a smoothing spline with fp = s: s too small or
badly chosen eps.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
3:"""
the maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached:
s too small.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
4:"""
No more knots can be added because the number of b-spline coefficients
(nx-kx-1)*(ny-ky-1) already exceeds the number of data points m:
either s or m too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
5:"""
No more knots can be added because the additional knot would (quasi)
coincide with an old one: s too small or too large a weight to an
inaccurate data point.
The weighted least-squares spline corresponds to the current set of
knots.""",
10:"""
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[i]<=xe, yb<=y[i]<=ye, w[i]>0, i=0..m-1
If iopt==-1, then
xb<tx[kx+1]<tx[kx+2]<...<tx[nx-kx-2]<xe
yb<ty[ky+1]<ty[ky+2]<...<ty[ny-ky-2]<ye""",
-3:"""
The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank deficient
system (deficiency=%i). If deficiency is large, the results may be
inaccurate. Deficiency may strongly depend on the value of eps."""
}
class BivariateSpline(object):
""" Bivariate spline s(x,y) of degrees kx and ky on the rectangle
[xb,xe] x [yb, ye] calculated from a given set of data points
(x,y,z).
See also:
bisplrep, bisplev - an older wrapping of FITPACK
UnivariateSpline - a similar class for univariate spline interpolation
SmoothUnivariateSpline - to create a BivariateSpline through the
given points
LSQUnivariateSpline - to create a BivariateSpline using weighted
least-squares fitting
"""
def get_residual(self):
""" Return weighted sum of squared residuals of the spline
approximation: sum ((w[i]*(z[i]-s(x[i],y[i])))**2,axis=0)
"""
return self.fp
def get_knots(self):
""" Return a tuple (tx,ty) where tx,ty contain knots positions
of the spline with respect to x-, y-variable, respectively.
The position of interior and additional knots are given as
t[k+1:-k-1] and t[:k+1]=b, t[-k-1:]=e, respectively.
"""
return self.tck[:2]
def get_coeffs(self):
""" Return spline coefficients."""
return self.tck[2]
def __call__(self,x,y,mth='array'):
""" Evaluate spline at positions x,y."""
if mth=='array':
tx,ty,c = self.tck[:3]
kx,ky = self.degrees
z,ier = dfitpack.bispev(tx,ty,c,kx,ky,x,y)
assert ier==0,'Invalid input: ier='+`ier`
return z
raise NotImplementedError
def ev(self, xi, yi):
"""
Evaluate spline at points (x[i], y[i]), i=0,...,len(x)-1
"""
tx,ty,c = self.tck[:3]
kx,ky = self.degrees
zi,ier = dfitpack.bispeu(tx,ty,c,kx,ky,xi,yi)
assert ier==0, 'Invalid input: ier='+`ier`
return zi
def integral(self, xa, xb, ya, yb):
"""
Evaluate the integral of the spline over area [xa,xb] x [ya,yb].
Parameters
----------
xa, xb : float
The end-points of the x integration interval.
ya, yb : float
The end-points of the y integration interval.
Returns
-------
integ : float
The value of the resulting integral.
"""
tx,ty,c = self.tck[:3]
kx,ky = self.degrees
return dfitpack.dblint(tx,ty,c,kx,ky,xa,xb,ya,yb)
class SmoothBivariateSpline(BivariateSpline):
""" Smooth bivariate spline approximation.
See also:
bisplrep, bisplev - an older wrapping of FITPACK
UnivariateSpline - a similar class for univariate spline interpolation
LSQUnivariateSpline - to create a BivariateSpline using weighted
least-squares fitting
"""
def __init__(self, x, y, z, w=None,
bbox = [None]*4, kx=3, ky=3, s=None, eps=None):
"""
Input:
x,y,z - 1-d sequences of data points (order is not
important)
Optional input:
w - positive 1-d sequence of weights
bbox - 4-sequence specifying the boundary of
the rectangular approximation domain.
By default, bbox=[min(x,tx),max(x,tx),
min(y,ty),max(y,ty)]
kx,ky=3,3 - degrees of the bivariate spline.
s - positive smoothing factor defined for
estimation condition:
sum((w[i]*(z[i]-s(x[i],y[i])))**2,axis=0) <= s
Default s=len(w) which should be a good value
if 1/w[i] is an estimate of the standard
deviation of z[i].
eps - a threshold for determining the effective rank
of an over-determined linear system of
equations. 0 < eps < 1, default is 1e-16.
"""
xb,xe,yb,ye = bbox
nx,tx,ny,ty,c,fp,wrk1,ier = dfitpack.surfit_smth(x,y,z,w,
xb,xe,yb,ye,
kx,ky,s=s,
eps=eps,lwrk2=1)
if ier in [0,-1,-2]: # normal return
pass
else:
message = _surfit_messages.get(ier,'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx[:nx],ty[:ny],c[:(nx-kx-1)*(ny-ky-1)]
self.degrees = kx,ky
class LSQBivariateSpline(BivariateSpline):
""" Weighted least-squares spline approximation.
See also:
bisplrep, bisplev - an older wrapping of FITPACK
UnivariateSpline - a similar class for univariate spline interpolation
SmoothUnivariateSpline - to create a BivariateSpline through the
given points
"""
def __init__(self, x, y, z, tx, ty, w=None,
bbox = [None]*4,
kx=3, ky=3, eps=None):
"""
Input:
x,y,z - 1-d sequences of data points (order is not
important)
tx,ty - strictly ordered 1-d sequences of knots
coordinates.
Optional input:
w - positive 1-d sequence of weights
bbox - 4-sequence specifying the boundary of
the rectangular approximation domain.
By default, bbox=[min(x,tx),max(x,tx),
min(y,ty),max(y,ty)]
kx,ky=3,3 - degrees of the bivariate spline.
eps - a threshold for determining the effective rank
of an over-determined linear system of
equations. 0 < eps < 1, default is 1e-16.
"""
nx = 2*kx+2+len(tx)
ny = 2*ky+2+len(ty)
tx1 = zeros((nx,),float)
ty1 = zeros((ny,),float)
tx1[kx+1:nx-kx-1] = tx
ty1[ky+1:ny-ky-1] = ty
xb,xe,yb,ye = bbox
tx1,ty1,c,fp,ier = dfitpack.surfit_lsq(x,y,z,tx1,ty1,w,\
xb,xe,yb,ye,\
kx,ky,eps,lwrk2=1)
if ier>10:
tx1,ty1,c,fp,ier = dfitpack.surfit_lsq(x,y,z,tx1,ty1,w,\
xb,xe,yb,ye,\
kx,ky,eps,lwrk2=ier)
if ier in [0,-1,-2]: # normal return
pass
else:
if ier<-2:
deficiency = (nx-kx-1)*(ny-ky-1)+ier
message = _surfit_messages.get(-3) % (deficiency)
else:
message = _surfit_messages.get(ier,'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx1,ty1,c
self.degrees = kx,ky
class RectBivariateSpline(BivariateSpline):
""" Bivariate spline approximation over a rectangular mesh.
Can be used for both smoothing or interpolating data.
See also:
SmoothBivariateSpline - a smoothing bivariate spline for scattered data
bisplrep, bisplev - an older wrapping of FITPACK
UnivariateSpline - a similar class for univariate spline interpolation
"""
def __init__(self, x, y, z,
bbox = [None]*4, kx=3, ky=3, s=0):
"""
Input:
x,y - 1-d sequences of coordinates in strictly ascending order
z - 2-d array of data with shape (x.size,y.size)
Optional input:
bbox - 4-sequence specifying the boundary of
the rectangular approximation domain.
By default, bbox=[min(x,tx),max(x,tx),
min(y,ty),max(y,ty)]
kx,ky=3,3 - degrees of the bivariate spline.
s - positive smoothing factor defined for
estimation condition:
sum((w[i]*(z[i]-s(x[i],y[i])))**2,axis=0) <= s
Default s=0 which is for interpolation
"""
x,y = ravel(x),ravel(y)
if not all(diff(x) > 0.0):
raise TypeError,'x must be strictly increasing'
if not all(diff(y) > 0.0):
raise TypeError,'y must be strictly increasing'
if not ((x.min() == x[0]) and (x.max() == x[-1])):
raise TypeError, 'x must be strictly ascending'
if not ((y.min() == y[0]) and (y.max() == y[-1])):
raise TypeError, 'y must be strictly ascending'
if not x.size == z.shape[0]:
raise TypeError,\
'x dimension of z must have same number of elements as x'
if not y.size == z.shape[1]:
raise TypeError,\
'y dimension of z must have same number of elements as y'
z = ravel(z)
xb,xe,yb,ye = bbox
nx,tx,ny,ty,c,fp,ier = dfitpack.regrid_smth(x,y,z,
xb,xe,yb,ye,
kx,ky,s)
if ier in [0,-1,-2]: # normal return
pass
else:
message = _surfit_messages.get(ier,'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx[:nx],ty[:ny],c[:(nx-kx-1)*(ny-ky-1)]
self.degrees = kx,ky
| bsd-3-clause | -7,880,119,224,775,439,000 | 37.016071 | 79 | 0.542722 | false | 3.706948 | false | false | false |
pcubillos/pytips | pytips/tips.py | 1 | 4945 | # Copyright (c) 2015-2019 Patricio Cubillos and contributors.
# pytips is open-source software under the MIT license (see LICENSE).
__all__ = ["tips", "iso", "molID", "molname", "to_file"]
import sys
import os
import numpy as np
from numpy.core.numeric import isscalar
libdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "lib")
sys.path.append(libdir)
import ctips as ct
_molname = np.array(["", "H2O", "CO2", "O3", "N2O", "CO",
"CH4", "O2", "NO", "SO2", "NO2",
"NH3", "HNO3", "OH", "HF", "HCl",
"HBr", "HI", "ClO", "OCS", "H2CO",
"HOCl", "N2", "HCN", "CH3Cl", "H2O2",
"C2H2", "C2H6", "PH3", "COF2", "SF6",
"H2S", "HCOOH", "HO2", "O", "ClONO2",
"NO+", "HOBr", "C2H4", "CH3OH", "CH3Br",
"CH3CN", "CF4", "C4H2", "HC3N", "H2",
"CS", "SO3", "C2N2", "SO", "C3H4",
"CH3", "CS2"])
def tips(molID, isoID, temp):
"""
Evaluate the partition function for the given isotope(s) at the given
temperature(s). This is a wrapper of ctips.tips.
Parameters
----------
molID: Scalar or iterable
The molecular ID as given by HITRAN 2012.
isoID: Scalar or iterable
The isotope ID (AFGL) as given by HITRAN 2012.
temp: Scalar or iterable
Temperature a which to evaluate the partition function.
Notes
-----
- The molID and isoID are casted into an integer ndarray data types.
- The temp is casted into a double ndarray data type.
- If the arguments have different sizes, the code resizes them to
a same size, unless they have incompatible sizes.
"""
# Check scalar vs iterable, turn into iterable:
if isscalar(molID):
molID = [molID]
if isscalar(isoID):
isoID = [isoID]
if isscalar(temp):
temp = [temp]
# Turn them numpy arrays:
molID = np.asarray(molID, np.int)
isoID = np.asarray(isoID, np.int)
temp = np.asarray(temp, np.double)
# Set them to the same size:
if len(isoID) != len(temp):
if len(isoID) == 1:
isoID = np.repeat(isoID, len(temp))
elif len(temp) == 1:
temp = np.repeat(temp, len(isoID))
else:
sys.exit(0)
if len(molID) != len(isoID):
if len(molID) != 1:
sys.exit(0)
molID = np.repeat(molID, len(isoID))
return ct.tips(molID, isoID, temp)
def iso(mID):
"""
Get the list of isotope IDs for the given molecule ID.
Parameters
----------
mID: String or integer
Molecule name (if string) or molecule ID.
Return
------
isoID: 1D integer ndarray
Isotopes ID for molecule mID.
"""
if isinstance(mID, str):
# Convert string to index if necesssary:
return ct.iso(int(molID(mID)))
return ct.iso(int(mID))
def molID(mname):
"""
Get the molecule ID for the requested molecule.
Parameters
----------
mname: String
Name of the molecule.
Return
------
mID: Integer
The molecule's ID.
"""
if mname not in _molname:
print("Molecule '{:s}' is not in list.".format(mname))
return None
return np.where(_molname == mname)[0][0]
def molname(mID):
"""
Get the molecule name for the requested molecule ID.
Parameters
----------
mID: Integer
The molecule's ID.
Return
------
mname: String
Name of the molecule.
"""
if (mID < 1) or (mID > 52):
print("Molecule ID '{:d}' is invalid.".format(mID))
return None
return _molname[mID]
def to_file(filename, molname, temp):
"""
Compute partition-function values for all isotopes of a given
molecule over a temperature array, and save to file.
Parameters
----------
filename: String
Output partition-function file.
molname: String
Name of the molecule.
temp: 1D float ndarray
Array of temperatures.
Example
-------
>>> import pytips as p
>>> temp = np.linspace(70, 3000, 294)
>>> molname = "CO2"
>>> p.to_file("CO2_tips.dat", molname, temp)
"""
# Compute partition function:
isoID = iso(molname)
niso = len(isoID)
ntemp = len(temp)
data = np.zeros((niso, ntemp), np.double)
for i in np.arange(niso):
data[i] = tips(molID(molname), isoID[i], temp)
# Save to file:
with open(filename, "w") as fout:
fout.write(
"# Tabulated {:s} partition-function data from TIPS.\n\n".format(molname))
fout.write("@ISOTOPES\n ")
for j in np.arange(niso):
fout.write(" {:10s}".format(str(isoID[j])))
fout.write("\n\n")
fout.write("# Temperature (K), partition function for each isotope:\n")
fout.write("@DATA\n")
for i in np.arange(ntemp):
fout.write(" {:7.1f} ".format(temp[i]))
for j in np.arange(niso):
fout.write(" {:10.4e}".format(data[j,i]))
fout.write("\n")
| mit | -1,915,433,626,671,041,500 | 25.72973 | 78 | 0.571082 | false | 3.121843 | false | false | false |
maoterodapena/pysouliss | souliss/Typicals.py | 1 | 9295 | import logging
import struct
_LOGGER = logging.getLogger(__name__)
typical_types = {
0x11: {
"desc": "T11: ON/OFF Digital Output with Timer Option", "size": 1,
"name": "Switch Timer",
"state_desc": { 0x00: "off",
0x01: "on"}
},
0x12: {"desc": "T12: ON/OFF Digital Output with AUTO mode",
"size": 1,
"name": "Switch auto",
"state_desc": { 0x00: "off",
0x01: "on",
0xF0: "on/auto",
0xF1: "off/auto"
}
},
0x13: {"desc": "T13: Digital Input Value",
"size": 1,
"state_desc": { 0x00: "off",
0x01: "on"}
},
0x14: {"desc": "T14: Pulse Digital Output",
"size": 1,
"name": "Switch",
"state_desc": { 0x00: "off",
0x01: "on"}
},
0x15: {"desc": "T15: RGB Light",
"size": 2,
"state_desc": { 0x00: "off",
0x01: "on"}
},
0x16: {"desc": "T16: RGB LED Strip",
"size": 4,
"state_desc": { 0x00: "on",
0x01: "on"}
},
0x18: {"desc": "T18: ON/OFF Digital Output (Step Relay)",
"size": 1,
"state_desc": { 0x00: "off",
0x01: "on"}
},
0x19: {"desc": "T19: Single Color LED Strip",
"size": 2,
"state_desc": { 0x00: "off",
0x01: "on"}
},
0x1A: {"desc": "T1A: Digital Input Pass Through",
"size": 1,
"state_desc": { 0x00: "off",
0x01: "on"}
},
0x1B: {"desc": "T1B: Position Constrained ON/OFF Digital Output", "size": 1},
0x21: {"desc": "T21: Motorized devices with limit switches", "size": 1},
0x22: {"desc": "T22: Motorized devices with limit switches and middle position", "size": 1},
0x31: {"desc": "T31: Temperature control with cooling and heating mode", "size": 5},
0x32: {"desc": "T32: Air Conditioner", "size": 2},
0x41: {"desc": "T41: Anti-theft integration -Main-", "size": 1},
0x42: {"desc": "T42: Anti-theft integration -Peer-", "size": 1},
0x51: {"desc": "T51: Analog input, half-precision floating point",
"size": 2,
"units": "units"},
0x52: {"desc": "T52: Temperature measure (-20, +50) C",
"size": 2,
"units": "C"},
0x53: {"desc": "T53: Humidity measure (0, 100) ",
"size": 2,
"units": "%"},
0x54: {"desc": "T54: Light Sensor (0, 40) kLux",
"size": 2,
"units": "kLux"},
0x55: {"desc": "T55: Voltage (0, 400) V",
"size": 2,
"units": "V"},
0x56: {"desc": "T56: Current (0, 25) A",
"size": 2,
"units": "A"},
0x57: {"desc": "T57: Power (0, 6500) W",
"size": 2,
"units": "W"},
0x58: {"desc": "T58: Pressure measure (0, 1500) hPa",
"size": 2,
"units": "hPa"},
0x61: {"desc": "T61: Analog setpoint, half-precision floating point", "size": 2},
0x62: {"desc": "T62: Temperature measure (-20, +50) C", "size": 2},
0x63: {"desc": "T63: Humidity measure (0, 100) ", "size": 2},
0x64: {"desc": "T64: Light Sensor (0, 40) kLux", "size": 2},
0x65: {"desc": "T65: Voltage (0, 400) V", "size": 2},
0x66: {"desc": "T66: Current (0, 25) A", "size": 2},
0x67: {"desc": "T67: Power (0, 6500) W", "size": 2},
0x68: {"desc": "T68: Pressure measure (0, 1500) hPa", "size": 2}
}
class Typical(object):
def __init__(self, ttype):
self.ttype = ttype
self.description = typical_types[ttype]['desc']
self.size = typical_types[ttype]['size']
self.slot = -1 # undefined until assigned to a slot
self.node = -1 # undefined until assigned to a slot
# inital state. It will be overwritten with the first update
self.state = b'\x00\x00\x00\x00\x00\x00\x00'
self.listeners = []
def add_listener(self, callback):
self.listeners.append(callback)
@staticmethod
def factory_type(ttype):
if ttype in [0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x18, 0x19, 0x1A, 0x1B]:
return TypicalT1n(ttype)
elif ttype in [0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58]:
return TypicalT5n(ttype)
else:
return TypicalNotImplemented(ttype)
def update(self, value):
value = value[:self.size]
if value != self.state:
self.state = value
self.state_description = value
_LOGGER.info("Node %d: Typical %d - %s updated from %s to %s" % (self.index,
self.description,
':'.join("{:02x}".format(c) for c in self.state[:self.size]),
':'.join("{:02x}".format(c) for c in value[:self.size])))
for listener in self.listeners:
listener(self)
"""
if self.mqtt:
# TODO: este self....
print("Publico mi nuevo estado %s" + self.state)
self.mqttc.publish('souliss/%s/%s/state' % (self.device_class, self.name), self.state)
"""
"""
def publish(self, mqttc):
if self.mqtt:
self.mqttc = mqttc
self.device_class = typical_types[self.ttype]['mqtt']
mqttc.publish('souliss/%s/%s/config' % (self.device_class, self.name),
'{"name" : "' + self.friendly_name + '", ' +
'"payload_on": "01", ' +
'"payload_off": "00", ' +
'"optimistic": false, ' +
'"retain": true, ' +
'"command_topic": "souliss/%s/%s/set", "state_topic": "souliss/%s/%s/state"}' \
% (self.device_class, self.name, self.device_class, self.name))
#'{"name" : "once,", "payload_on": "0", "payload_off": "1", "optimistic": false, "retain": true, "state_topic": "souliss/switch/%s", "command_topic": "souliss/switch/%s/set"}' % (self.name, self.name))
#mqttc.subscribe("souliss/%s/%s/#" % (self.device_class, self.name))
#mqttc.subscribe("souliss/switch/%s" % self.name)
else:
print('WARNING: I do not know mqtt device for ' + self.description)
"""
def set_node_slot_index(self, node, slot, index):
self.node = node
self.slot = slot
self.index = index
def to_dict(self):
return {'ddesc': self.description,
'slo': self.slot,
'typ': self.ttype}
class TypicalT1n(Typical):
def __init__(self, ttype):
super(TypicalT1n,self).__init__(ttype)
self.state_desc = typical_types[ttype]['state_desc']
def update(self, value):
value = value[:self.size]
if value != self.state:
self.state = value
if self.size > 1: # Raw description for Typicals T15, T16 and T19
self.state_description = ':'.join("{:02x}".format(c) for c in self.state)
else:
if ord(value) in self.state_desc.keys():
self.state_description = self.state_desc[ord(value)]
else:
_LOGGER.warning("Unknow value!")
self.state_description = "Unknow value!"
_LOGGER.info("Node %d: Typical %d - %s updated to %s" % (self.node, self.index,
self.description,
self.state_description))
for listener in self.listeners:
listener(self)
def send_command(self, command):
# TODO: Handle different T1 behaviour
if command == 0x01: # Toggle
if self.state == chr(1):
self.update(chr(0))
else:
self.update(chr(1))
elif command == 0x02: # OnCmd
self.update(chr(0))
elif command == 0x04: # OffCmd
self.update(chr(1))
else:
_LOGGER.debug('Command %x not implemented' % command)
class TypicalT5n(Typical):
def __init__(self, ttype):
super(TypicalT5n,self).__init__(ttype)
self.units= typical_types[ttype]['units']
def update(self, value):
value = value[:self.size]
if value != self.state:
self.state_description = struct.unpack('e', value)[0]
self.state = value
_LOGGER.info("Node %d: Typical %d - %s updated to %s %s" % (self.node, self.index,
self.description,
self.state_description,
self.units))
for listener in self.listeners:
listener(self)
class TypicalNotImplemented(Typical):
def __init__(self, ttype):
_LOGGER.warning('Typical %x not implemented' % ttype)
super(TypicalNotImplemented,self).__init__(ttype)
| mit | -5,909,111,028,891,820,000 | 38.553191 | 226 | 0.477569 | false | 3.506224 | false | false | false |
Nic30/hwtLib | hwtLib/amba/axis_comp/frame_join/input_reg.py | 1 | 6892 | from hwt.code import If
from hwt.code_utils import rename_signal
from hwt.hdl.types.bits import Bits
from hwt.hdl.types.defs import BIT
from hwt.hdl.types.struct import HStruct
from hwt.interfaces.std import VectSignal, Signal
from hwt.interfaces.utils import addClkRstn
from hwt.pyUtils.arrayQuery import iter_with_last
from hwt.serializer.mode import serializeParamsUniq
from hwt.synthesizer.hObjList import HObjList
from hwt.synthesizer.interface import Interface
from hwt.synthesizer.param import Param
from hwt.synthesizer.unit import Unit
from hwtLib.amba.axis import AxiStream
from pyMathBitPrecise.bit_utils import mask
class UnalignedJoinRegIntf(Interface):
"""
.. hwt-autodoc::
"""
def _config(self):
AxiStream._config(self)
def _declr(self):
self.data = VectSignal(self.DATA_WIDTH)
self.keep = VectSignal(self.DATA_WIDTH // 8)
if self.USE_STRB:
self.strb = VectSignal(self.DATA_WIDTH // 8)
self.relict = Signal()
self.last = Signal()
@serializeParamsUniq
class FrameJoinInputReg(Unit):
"""
Pipeline of registers for AxiStream with keep mask and flushing
.. hwt-autodoc::
"""
def _config(self):
self.REG_CNT = Param(2)
AxiStream._config(self)
self.USE_KEEP = True
def _declr(self):
assert self.USE_KEEP
addClkRstn(self)
with self._paramsShared():
self.dataIn = AxiStream()
self.regs = HObjList(
UnalignedJoinRegIntf()._m()
for _ in range(self.REG_CNT))
self.keep_masks = HObjList(
VectSignal(self.DATA_WIDTH // 8)
for _ in range(self.REG_CNT)
)
# used to shift whole register pipeline using input keep_mask
self.ready = Signal()
if self.ID_WIDTH or self.USER_WIDTH or self.DEST_WIDTH:
raise NotImplementedError("It is not clear how id/user/dest"
" should be managed between the frames")
def _impl(self):
mask_t = Bits(self.DATA_WIDTH // 8, force_vector=True)
data_fieds = [
(Bits(self.DATA_WIDTH), "data"),
(mask_t, "keep"), # valid= keep != 0
(BIT, "relict"), # flag for partially consumed word
(BIT, "last"), # flag for end of frame
]
if self.USE_STRB:
data_fieds.append((mask_t, "strb"),
)
data_t = HStruct(*data_fieds)
# regs[0] connected to output as first, regs[-1] connected to input
regs = [
self._reg(f"r{r_i:d}", data_t, def_val={"keep": 0,
"last": 0,
"relict": 0})
for r_i in range(self.REG_CNT)
]
ready = self.ready
keep_masks = self.keep_masks
fully_consumed_flags = []
for i, r in enumerate(regs):
_fully_consumed = (r.keep & keep_masks[i])._eq(0)
if i == 0:
_fully_consumed = _fully_consumed & self.ready
fully_consumed_flags.append(rename_signal(self, _fully_consumed, f"r{i:d}_fully_consumed"))
for i, (is_first_on_input_r, r) in enumerate(iter_with_last(regs)):
keep_mask_all = mask(r.keep._dtype.bit_length())
prev_keep_mask = self._sig(f"prev_keep_mask_{i:d}_tmp", r.keep._dtype)
prev_last_mask = self._sig(f"prev_last_mask_{i:d}_tmp")
is_empty = rename_signal(self, r.keep._eq(0) & ~(r.last & r.relict), f"r{i:d}_is_empty")
if is_first_on_input_r:
# is register connected directly to dataIn
r_prev = self.dataIn
If(r_prev.valid,
prev_keep_mask(keep_mask_all),
prev_last_mask(1)
).Else(
# flush (invalid input but the data can be dispersed
# in registers so we need to collapse it)
prev_keep_mask(0),
prev_last_mask(0),
)
if self.REG_CNT > 1:
next_r = regs[i - 1]
next_empty = next_r.keep._eq(0) & ~(next_r.relict & next_r.last)
else:
next_empty = 0
whole_pipeline_shift = (ready & (regs[0].keep & self.keep_masks[0])._eq(0))
r_prev.ready(is_empty # last input reg empty
| whole_pipeline_shift
| next_empty)
else:
r_prev = regs[i + 1]
prev_last_mask(1)
If(is_empty,
# flush
prev_keep_mask(keep_mask_all),
).Else(
prev_keep_mask(keep_masks[i + 1]),
)
data_drive = [r.data(r_prev.data), ]
if self.USE_STRB:
data_drive.append(r.strb(r_prev.strb))
fully_consumed = fully_consumed_flags[i]
if i == 0:
# last register in path
If((ready & fully_consumed) | is_empty,
*data_drive,
r.keep(r_prev.keep & prev_keep_mask),
r.last(r_prev.last & prev_last_mask),
r.relict(
r_prev.valid & r_prev.keep._eq(0)
if is_first_on_input_r else
# [TODO] potentially it should not be keep[0] but fist keep with 1
r_prev.relict | (r_prev.last & (r_prev.keep[0] & ~keep_masks[i + 1][0] & ~fully_consumed_flags[i + 1]))
)
).Elif(ready,
r.keep(r.keep & keep_masks[i]),
r.relict(1), # became relict if there is some 1 in keep (== not fully consumed)
)
else:
next_fully_consumed = fully_consumed_flags[i - 1]
next_r = regs[i - 1]
next_is_empty = next_r.keep._eq(0) & ~(next_r.relict & next_r.last)
if is_first_on_input_r:
is_relict = r_prev.valid & r_prev.keep._eq(0)
else:
prev_fully_consumed = fully_consumed_flags[i + 1]
is_relict = r_prev.relict | ~prev_fully_consumed
If((ready & next_fully_consumed) | is_empty | next_is_empty,
*data_drive,
r.keep(r_prev.keep & prev_keep_mask),
r.last(r_prev.last & prev_last_mask),
r.relict(is_relict)
)
for rout, rin in zip(self.regs, regs):
rout.data(rin.data)
if self.USE_STRB:
rout.strb(rin.strb)
rout.keep(rin.keep)
rout.relict(rin.relict)
rout.last(rin.last)
| mit | -7,346,553,896,573,574,000 | 37.288889 | 127 | 0.506965 | false | 3.612159 | false | false | false |
rasathus/pigredients | pigredients/core/bitman.py | 1 | 2901 | class BitSequence(object):
def __init__(self, byte_data, word_length=8):
self.value = byte_data
self.bits_per_word = word_length
def _bitLen(self):
# Warning this does not take into account leading zeros, ie. 0000000010000000, you probbaly want to be using len(self)
length = 0
temp_val = self.value
while (temp_val):
temp_val >>= 1
length += 1
return(length)
def twos_complement(self):
val = int(self)
bits = len(self)
try:
if self[bits-1] != 0:
val = val - (1<<bits)
except ValueError:
# trying to negative shift zero.
pass
return val
def __getitem__(self, val):
return int(bin(self.value & (1 << val))[2:][0])
def __setitem__(self, key, val):
try:
bool(val)
except:
raise TypeError("Possible bit values should evaluate to True of False, not %s" % val)
if val:
# set bit 'key' to 1
self.value |= 1 << key
else:
# set bit 'key' to 0
self.value &= ~(1 << key)
def __len__(self):
# work out how many words are needed to represent the value, and return this number of bits as its length
if (self._bitLen() % self.bits_per_word) == 0 :
return int(self._bitLen())
else:
return int(self._bitLen() + self.bits_per_word - (self._bitLen() % self.bits_per_word))
def __str__(self):
return "0b%s" % bin(self.value)[2:].zfill(len(self))
def __int__(self):
return self.value
def __iter__(self):
for bit in range(len(self)):
yield self[bit]
def __invert__(self):
return ~int(self)
def __abs__(self):
return int(self)
if __name__ == '__main__':
bitseq = BitSequence(0b0000000010101010)
print bitseq
bitseq = BitSequence(0b0000100010101010)
print bitseq
print "First : %d Second : %d" % (bitseq[0], bitseq[1])
bitseq[0] = 1
bitseq[1] = 1
print "First : %d Second : %d Twentieth : %d" % (bitseq[0], bitseq[1], bitseq[20])
print bitseq
bitseq[0] = True
bitseq[1] = False
bitseq[5] = None
bitseq[6] = 1
bitseq[7] = 1
bitseq[20] = 1
print "First : %d Second : %d" % (bitseq[0], bitseq[1])
print bitseq
bitseq1 = BitSequence(0b01)
bitseq2 = BitSequence(0b10)
print "Equal : %s" % bitseq1 == bitseq2
print "Not Equal : %s" % bitseq1 != bitseq2
print "%d Greater than %d : %s" % (bitseq1, bitseq2, bitseq1 > bitseq2)
print "%d Less than %d : %s" % (bitseq1, bitseq2, bitseq1 < bitseq2)
print "len(sequence) : %d" % len(bitseq)
print "Printing bit sequece ..."
for bit in bitseq:
print bit
| mit | 3,311,757,226,960,893,000 | 27.722772 | 126 | 0.525681 | false | 3.42503 | false | false | false |
nOkuda/classtm | classtm/classifier.py | 1 | 3458 | """Classifiers"""
import os
import subprocess
import numpy as np
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
SVM_DIR = os.path.join(FILE_DIR, 'svm_light')
SVM_LEARN = os.path.join(SVM_DIR, 'svm_learn')
SVM_CLASSIFY = os.path.join(SVM_DIR, 'svm_classify')
class TSVM:
"""Transductive support vector machine"""
def __init__(self, varname, classorder):
self.outdir = varname+'_tsvm'
if len(varname) >= 86:
raise Exception('Output name prefix is too long: '+self.varname)
os.makedirs(self.outdir, exist_ok=True)
self.train_prefix = os.path.join(self.outdir, 'train')
self.model_prefix = os.path.join(self.outdir, 'model')
self.test_name = os.path.join(self.outdir, 'test.dat')
self.pred_prefix = os.path.join(self.outdir, 'pred')
self.classorder = classorder
self.orderedclasses = [0] * len(self.classorder)
for key, val in self.classorder.items():
self.orderedclasses[val] = key
def _train_name(self, label):
return self.train_prefix+'_'+str(label)+'.dat'
def _model_name(self, label):
return self.model_prefix+'_'+str(label)
def _pred_name(self, label):
return self.pred_prefix+'_'+str(label)
def _write_feats(self, ofh, feats):
"""Writes the features into the data file"""
# expecting feats to be a csr row
for col, datum in zip(feats.indices[feats.indptr[0]:feats.indptr[1]],
feats.data[feats.indptr[0]:feats.indptr[1]]):
ofh.write(str(col+1))
ofh.write(':')
ofh.write(str(datum))
ofh.write(' ')
def fit(self, features, labels):
"""Call SVMLight for transductive SVM training
features must be a csr matrix
"""
for label_type in self.classorder:
train_file = self._train_name(label_type)
with open(train_file, 'w') as ofh:
for feats, label in zip(features, labels):
if label_type == 'unknown':
ofh.write('0 ')
elif label_type == label:
ofh.write('+1 ')
else:
ofh.write('-1 ')
self._write_feats(ofh, feats)
ofh.write('\n')
subprocess.run(
[
SVM_LEARN,
train_file,
self._model_name(label_type)])
def predict(self, features):
"""Call SVMLight for transductive SVM prediction"""
with open(self.test_name, 'w') as ofh:
for feats in features:
ofh.write('0 ')
self._write_feats(ofh, feats)
ofh.write('\n')
predictions = []
for label_type in self.classorder:
pred_name = self._pred_name(label_type)
subprocess.run(
[
SVM_CLASSIFY,
self.test_name,
self._model_name(label_type),
pred_name])
tmp = []
with open(pred_name) as ifh:
for line in ifh:
line = line.strip()
tmp.append(float(line))
predictions.append(tmp)
predictions = np.argmax(np.array(predictions), axis=0)
return np.array([self.orderedclasses[a] for a in predictions])
| gpl-3.0 | -5,612,169,472,402,530,000 | 35.020833 | 77 | 0.525448 | false | 3.846496 | false | false | false |
maxalbert/sumatra | sumatra/pfi.py | 5 | 1224 | #!/usr/bin/env python
"""
Obtain platform information from every node of a cluster.
This script should be placed somewhere on the user's path.
:copyright: Copyright 2006-2015 by the Sumatra team, see doc/authors.txt
:license: BSD 2-clause, see LICENSE for details.
"""
from __future__ import unicode_literals
from mpi4py import MPI
import platform
import socket
from datetime import datetime
TIMESTAMP_FORMAT = "%Y%m%d-%H%M%S"
MPI_ROOT = 0
comm = MPI.Comm.Get_parent()
rank = comm.Get_rank()
network_name = platform.node()
bits, linkage = platform.architecture()
platform_information = {
network_name: dict(architecture_bits=bits,
architecture_linkage=linkage,
machine=platform.machine(),
network_name=network_name,
ip_addr=socket.gethostbyname(network_name),
processor=platform.processor(),
release=platform.release(),
system_name=platform.system(),
version=platform.version(),
clock=datetime.now().strftime(TIMESTAMP_FORMAT))
}
comm.send(platform_information, dest=MPI_ROOT, tag=rank)
comm.Disconnect()
| bsd-2-clause | -8,951,961,362,114,423,000 | 29.6 | 72 | 0.634804 | false | 4.163265 | false | false | false |
aglitke/vdsm | tests/sslTests.py | 2 | 11912 | #
# Copyright 2012 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
import os
import re
import socket
import tempfile
import threading
import subprocess
import errno
import testrunner
from vdsm import SecureXMLRPCServer
class SSLServerThread(threading.Thread):
"""A very simple server thread.
This server waits for SSL connections in a serial
fashion and then echoes whatever the client sends.
"""
def __init__(self, server):
threading.Thread.__init__(self)
self.server = server
self.stop = threading.Event()
def run(self):
# It is important to set a timeout in the server thread to be
# able to check periodically the stop flag:
self.server.settimeout(1)
# Accept client connections:
while not self.stop.isSet():
try:
client, address = self.server.accept()
client.settimeout(1)
try:
while True:
data = client.recv(1024)
if data:
client.sendall(data)
else:
break
except:
# We don't care about exceptions here, only on the
# client side:
pass
finally:
client.close()
except:
# Nothing to do here, we will check the stop flag in the
# next iteration of the loop:
pass
def shutdown(self):
# Note that this doesn't stop the thready inmediately, it just
# indicates that stopping is requested, the thread will stop
# with next iteration of the accept loop:
self.stop.set()
class SSLTests(testrunner.VdsmTestCase):
"""Tests of SSL communication"""
def setUp(self):
"""Prepares to run the tests.
The preparation consist on creating temporary files containing
the keys and certificates and starting a thread that runs a
simple SSL server.
"""
# Save the key to a file:
with tempfile.NamedTemporaryFile(delete=False) as tmp:
tmp.write(KEY)
self.keyfile = tmp.name
# Save the certificate to a file:
with tempfile.NamedTemporaryFile(delete=False) as tmp:
tmp.write(CERTIFICATE)
self.certfile = tmp.name
# Create the server socket:
self.server = socket.socket()
self.server = SecureXMLRPCServer.SSLServerSocket(
raw=self.server,
keyfile=self.keyfile,
certfile=self.certfile,
ca_certs=self.certfile)
self.address = self.tryBind(ADDRESS)
self.server.listen(5)
# Start the server thread:
self.thread = SSLServerThread(self.server)
self.thread.start()
def tryBind(self, address):
ipadd, port = address
while True:
try:
self.server.bind((ipadd, port))
return (ipadd, port)
except socket.error as ex:
if ex.errno == errno.EADDRINUSE:
port += 1
if port > 65535:
raise socket.error(
errno.EADDRINUSE,
"Can not find available port to bind")
else:
raise
def tearDown(self):
"""Release the resources used by the tests.
Removes the temporary files containing the keys and certifites,
stops the server thread and closes the server socket.
"""
# Delete the temporary files:
os.remove(self.keyfile)
os.remove(self.certfile)
# Stop the server thread and wait for it to finish:
self.thread.shutdown()
self.thread.join()
del self.thread
# Close the server socket:
self.server.shutdown(socket.SHUT_RDWR)
self.server.close()
del self.server
def runSClient(self, args=None, input=None):
"""This method runs the OpenSSL s_client command.
The address parameter is a tuple containg the address
of the host and the port number that will be used to
build the -connect option of the command.
The args parameter is the list of additional parameters
to pass to the command.
The input parameter is the data that will be piped to the
standard input of the command.
The method returns a tuple containing the exit code of the
command and the data generated in the standard output.
"""
command = [
"openssl",
"s_client",
"-connect", "%s:%d" % self.address,
]
if args:
command += args
print("command=%s" % command)
process = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = process.communicate(input)
rc = process.wait()
print("rc=%d" % rc)
print("out=%s" % out)
print("err=%s" % err)
return rc, out
def extractField(self, name, text):
"""
Extracts the value of one of the informative fields provided in
the output of the s_client command.
The name parameter is the name of the field, for example
Session-ID for the SSL session identifier.
The text parameter should be the output of the execution of the
s_client command.
Returns the value of the given field or None if that field can't
be fond in the provided output of the s_client command.
"""
pattern = r"^\s*%s\s*:\s*(?P<value>[^\s]*)\s*$" % name
expression = re.compile(pattern, flags=re.MULTILINE)
match = expression.search(text)
if not match:
return None
value = match.group("value")
print("%s=%s" % (name, value))
return value
def testConnectWithoutCertificateFails(self):
"""
Verify that the connection without a client certificate
fails.
"""
rc, _ = self.runSClient()
self.assertNotEquals(rc, 0)
def testConnectWithCertificateSucceeds(self):
"""
Verify that the connection with a valid client certificate
works correctly.
"""
rc, _ = self.runSClient([
"-cert", self.certfile,
"-key", self.keyfile,
])
self.assertEquals(rc, 0)
def testSessionIsCached(self):
"""
Verify that SSL the session identifier is preserved when
connecting two times without stopping the server.
"""
# Create a temporary file to store the session details:
sessionDetailsFile = tempfile.NamedTemporaryFile(delete=False)
# Connect first time and save the session to a file:
rc, out = self.runSClient([
"-cert", self.certfile,
"-key", self.keyfile,
"-sess_out", sessionDetailsFile.name,
])
self.assertEquals(rc, 0)
# Get the session id from the output of the command:
firstSessionId = self.extractField("Session-ID", out)
self.assertTrue(firstSessionId is not None)
# Connect second time using the saved session file:
rc, out = self.runSClient([
"-cert", self.certfile,
"-key", self.keyfile,
"-sess_in", sessionDetailsFile.name,
])
self.assertEquals(rc, 0)
# Get the session id again:
secondSessionId = self.extractField("Session-ID", out)
self.assertTrue(secondSessionId is not None)
# Remove the temporary file used to store the session details,
# as we don't need it any longer:
os.remove(sessionDetailsFile.name)
# Compare the session ids:
self.assertEquals(secondSessionId, firstSessionId)
# The address of the tests server:
ADDRESS = ("127.0.0.1", 8443)
# Private key used for the tests:
KEY = """
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDapPcHwCWYsfiH
pJ/tXpcSZsa6ocJZaL3HF/mFxiO4/7za6lP0Vdtln4CwCzqAfUJKQhCHNyYUvZsf
Eylr0U30MQzhynq8+F5co5f2RNzz93aL7cjEUQMK2YaShLxz7o/QdoNSnT8sJ3TO
P16VEcpngoBD/nDXxNf0HekwhENYz4K2Hqol0xcGY6x8cJoXNybBPheVGTl6wy+r
W9YPuL0gR2/GgyVT1UP0EBGebkvza+eVaenrp0qrMiEQMDAOeNq3mu6ueOUo03Hn
xaEqxrToYv0eBbpF2Z469uJXaLP/NmcT1GUbFqP3H+/Js68HwxCEqb1kKGiG8E58
hSHHM95ZAgMBAAECggEAeMU2TmmsWrOze/lK/WqKN/fdPamsGIbqjNaJVYMkqENa
pfFZflUOYwu/oX4SSnbl7u6fApFLz5kL3hZPguaSEJgnbXDSax8lwDX88mMHSRsf
uBsYEphM/ek5lCUNk1vqxFMyJqgFBPamZmZKcDzreFF1WBlra0OnpYgADnSAXsT7
HcQDkSe1s1YuuRYYUuRc5KYhrQ5P3AHCJ++w7QK7wZbo/5iQuVuuytMBbCWFNH06
K+fEqZRB9wXg9ubvvbcAlX579QL2HRZl5GvhSP+2Jah/zoTndXAKVVWWx8L1ohKg
aAOxWGFy4f47BQwmkafZVYIGsfudEK4Dmf6UmwvVIQKBgQDw8r5ihTHuXLuyBtwy
J+Pn//zY1FKJcANshvFgQtrfbmLiulXDtvaiitdkQj8HyTeEtgtuGt5mnE5uKm8N
MV9eSU2FyuyazwlemI4XYdQWtcw+ZBh7K3u6/QjqDJfNjVDnv7S2VS9DDs8Ga7r4
fanecGfQ6ni5Mqxb2OAlOcBYRwKBgQDoTYmR35Lo/qkJ6Mm+8IljdvN3iAgqkO67
b6WhjkTwgO/Y+zGfQ/W2PbPsVWc1f3IBYvKmArvMDB5PZ9HyzIg27OxCyhjbLmvb
kEPjQF6f+FOb4h4yo9i2dBJucFAKrHMHiqH24Hlf3WOordxX9lY37M0fwpg2kZIM
ConIt/4EXwKBgDIXtV8UI+pTWy5K4NKImogsHywREEvEfuG8OEhz/b7/2w0aAiSb
UDFAvkD4yNPckG9FzaCJc31Pt7qNleLfRd17TeOn6YLR0jfZbYkM7KQADcNW2gQZ
aTLZ0lWeYpz4aT6VC4Pwt8+wL3g9Q3TP41X8dojnhkuybkT2FLuIgyWXAoGAMJUW
skU5qjSoEYR3vND9Sqnz3Qm7+3r4EocU8qaYUFwGzTArfo1t88EPwdtSjGOs6hFR
gdqMf+4A4MZrqAWSbzo5ZvZxIFWjBPY03G/32ijLA4zUl+6gQfggaqxecP0DyY36
tXDYsW3Ri9Ngg5znByck9wFxZ+glzRLfIfUo0K0CgYEAkogcGLKGb5zdwAXuUVQK
ftftLEARqs/gMA1cItxurtho0JUxYaaKgSICB7MQPEuTtdUNqCkeu9S838dbyfL7
gGdsZ26Can3IAyQv7+3DObvB376T4LD8Mp/ZHvOpeZQQ9O4ngadteRcBaCcd78Ij
VSgxeSvBewtCS1FnILwgXJ4=
-----END PRIVATE KEY-----
"""
# This is the certificate used for the tests, and it expires in Sep 26
# 2022, so don't be surprised if by that date the test starts failing:
CERTIFICATE = """
-----BEGIN CERTIFICATE-----
MIIC8zCCAdugAwIBAgIBADANBgkqhkiG9w0BAQUFADAUMRIwEAYDVQQDDAkxMjcu
MC4wLjEwHhcNMTIwOTI4MTcyMzE3WhcNMjIwOTI2MTcyMzE3WjAUMRIwEAYDVQQD
DAkxMjcuMC4wLjEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDapPcH
wCWYsfiHpJ/tXpcSZsa6ocJZaL3HF/mFxiO4/7za6lP0Vdtln4CwCzqAfUJKQhCH
NyYUvZsfEylr0U30MQzhynq8+F5co5f2RNzz93aL7cjEUQMK2YaShLxz7o/QdoNS
nT8sJ3TOP16VEcpngoBD/nDXxNf0HekwhENYz4K2Hqol0xcGY6x8cJoXNybBPheV
GTl6wy+rW9YPuL0gR2/GgyVT1UP0EBGebkvza+eVaenrp0qrMiEQMDAOeNq3mu6u
eOUo03HnxaEqxrToYv0eBbpF2Z469uJXaLP/NmcT1GUbFqP3H+/Js68HwxCEqb1k
KGiG8E58hSHHM95ZAgMBAAGjUDBOMB0GA1UdDgQWBBR0dTG068xPsrXKDD6r6Ne+
8RQghzAfBgNVHSMEGDAWgBR0dTG068xPsrXKDD6r6Ne+8RQghzAMBgNVHRMEBTAD
AQH/MA0GCSqGSIb3DQEBBQUAA4IBAQCoY1bFkafDv3HIS5rBycVL0ghQV2ZgQzAj
sCZ47mgUVZKL9DiujRUFtzrMRhBBfyeT0Bv8zq+eijhGmjp8WqyRWDIwHoQwxHmD
EoQhAMR6pXvjZdYI/vwHJK5u0hADQZJ+zZp77m/p95Ds03l/g/FZHbCdISTTJnXw
t6oeDZzz/dQSAiuyAa6+0tdu2GNF8OkR5c7W+XmL797soiT1uYMgwIYQjM1NFkKN
vGc0b16ODiPvsB0bo+USw2M0grjsJEC0dN/GBgpFHO4oKAodvEWGGxANSHAXoD0E
bh5L7zBhjgag+o+ol2PDNZMrJlFvw8xzhQyvofx2h7H+mW0Uv6Yr
-----END CERTIFICATE-----
"""
| gpl-2.0 | 846,182,912,940,857,900 | 34.879518 | 79 | 0.670165 | false | 3.011884 | true | false | false |
JanMalte/django-sortable-listview | example_project/simple_blog/settings.py | 1 | 4779 | import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(os.path.dirname(__file__), 'simple_blog.sqlite'),
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '^x+vb79pfnjn@3ozbjwl&#xlo^_sybox877z24-*1cokdx%0ex'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'simple_blog.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'simple_blog.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'simple_blog',
'sortable_listview',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| mit | 2,895,065,102,437,082,000 | 32.1875 | 88 | 0.70203 | false | 3.673328 | false | false | false |
GentlemanBrewing/ADCLibraries-MCP3424 | ADCDACPi/demo-dacsinewave.py | 1 | 3735 | #!/usr/bin/python3
from ABE_ADCDACPi import ADCDACPi
import time
import math
"""
================================================
ABElectronics ADCDAC Pi 2-Channel ADC, 2-Channel DAC | DAC sine wave generator demo
Version 1.0 Created 29/02/2015
run with: python3 demo-dacsinewave.py
================================================
# this demo uses the set_dac_raw method to generate a sine wave from a
# predefined set of values
"""
adcdac = ADCDACPi()
DACLookup_FullSine_12Bit = \
[2048, 2073, 2098, 2123, 2148, 2174, 2199, 2224,
2249, 2274, 2299, 2324, 2349, 2373, 2398, 2423,
2448, 2472, 2497, 2521, 2546, 2570, 2594, 2618,
2643, 2667, 2690, 2714, 2738, 2762, 2785, 2808,
2832, 2855, 2878, 2901, 2924, 2946, 2969, 2991,
3013, 3036, 3057, 3079, 3101, 3122, 3144, 3165,
3186, 3207, 3227, 3248, 3268, 3288, 3308, 3328,
3347, 3367, 3386, 3405, 3423, 3442, 3460, 3478,
3496, 3514, 3531, 3548, 3565, 3582, 3599, 3615,
3631, 3647, 3663, 3678, 3693, 3708, 3722, 3737,
3751, 3765, 3778, 3792, 3805, 3817, 3830, 3842,
3854, 3866, 3877, 3888, 3899, 3910, 3920, 3930,
3940, 3950, 3959, 3968, 3976, 3985, 3993, 4000,
4008, 4015, 4022, 4028, 4035, 4041, 4046, 4052,
4057, 4061, 4066, 4070, 4074, 4077, 4081, 4084,
4086, 4088, 4090, 4092, 4094, 4095, 4095, 4095,
4095, 4095, 4095, 4095, 4094, 4092, 4090, 4088,
4086, 4084, 4081, 4077, 4074, 4070, 4066, 4061,
4057, 4052, 4046, 4041, 4035, 4028, 4022, 4015,
4008, 4000, 3993, 3985, 3976, 3968, 3959, 3950,
3940, 3930, 3920, 3910, 3899, 3888, 3877, 3866,
3854, 3842, 3830, 3817, 3805, 3792, 3778, 3765,
3751, 3737, 3722, 3708, 3693, 3678, 3663, 3647,
3631, 3615, 3599, 3582, 3565, 3548, 3531, 3514,
3496, 3478, 3460, 3442, 3423, 3405, 3386, 3367,
3347, 3328, 3308, 3288, 3268, 3248, 3227, 3207,
3186, 3165, 3144, 3122, 3101, 3079, 3057, 3036,
3013, 2991, 2969, 2946, 2924, 2901, 2878, 2855,
2832, 2808, 2785, 2762, 2738, 2714, 2690, 2667,
2643, 2618, 2594, 2570, 2546, 2521, 2497, 2472,
2448, 2423, 2398, 2373, 2349, 2324, 2299, 2274,
2249, 2224, 2199, 2174, 2148, 2123, 2098, 2073,
2048, 2023, 1998, 1973, 1948, 1922, 1897, 1872,
1847, 1822, 1797, 1772, 1747, 1723, 1698, 1673,
1648, 1624, 1599, 1575, 1550, 1526, 1502, 1478,
1453, 1429, 1406, 1382, 1358, 1334, 1311, 1288,
1264, 1241, 1218, 1195, 1172, 1150, 1127, 1105,
1083, 1060, 1039, 1017, 995, 974, 952, 931,
910, 889, 869, 848, 828, 808, 788, 768,
749, 729, 710, 691, 673, 654, 636, 618,
600, 582, 565, 548, 531, 514, 497, 481,
465, 449, 433, 418, 403, 388, 374, 359,
345, 331, 318, 304, 291, 279, 266, 254,
242, 230, 219, 208, 197, 186, 176, 166,
156, 146, 137, 128, 120, 111, 103, 96,
88, 81, 74, 68, 61, 55, 50, 44,
39, 35, 30, 26, 22, 19, 15, 12,
10, 8, 6, 4, 2, 1, 1, 0,
0, 0, 1, 1, 2, 4, 6, 8,
10, 12, 15, 19, 22, 26, 30, 35,
39, 44, 50, 55, 61, 68, 74, 81,
88, 96, 103, 111, 120, 128, 137, 146,
156, 166, 176, 186, 197, 208, 219, 230,
242, 254, 266, 279, 291, 304, 318, 331,
345, 359, 374, 388, 403, 418, 433, 449,
465, 481, 497, 514, 531, 548, 565, 582,
600, 618, 636, 654, 673, 691, 710, 729,
749, 768, 788, 808, 828, 848, 869, 889,
910, 931, 952, 974, 995, 1017, 1039, 1060,
1083, 1105, 1127, 1150, 1172, 1195, 1218, 1241,
1264, 1288, 1311, 1334, 1358, 1382, 1406, 1429,
1453, 1478, 1502, 1526, 1550, 1575, 1599, 1624,
1648, 1673, 1698, 1723, 1747, 1772, 1797, 1822,
1847, 1872, 1897, 1922, 1948, 1973, 1998, 2023]
while True:
for val in DACLookup_FullSine_12Bit:
adcdac.set_dac_raw(1, val)
| mit | -9,149,330,796,570,357,000 | 40.966292 | 83 | 0.57992 | false | 2.207447 | false | false | false |
rdebroiz/presto | data_model.py | 1 | 5544 | import re
import logging
from pprint import pformat
from scope import Scope
import settings
try:
import path
except ImportError:
logging.critical("Presto requiered path.py to be installed, "
"checkout requirement.txt.")
raise
# char to escape in a regular expression to be taken as literal.
TO_ESCAPE_FOR_RE = r"()[]{}*+?|.^$\\"
# char to escaped inside [] in a regular expression to be taken as literal.
TO_ESCAPE_INSIDE_BRACKET_FOR_RE = r"\^\-\]\\"
def escape_reserved_re_char(string):
"""
Escape with a backslash characters reserved by regular expressions
in the given string.
"""
# first escape all char that have to be escaped inside []
# (we're actually putting them inside [])
to_escape = re.sub("(?P<char>[" + TO_ESCAPE_INSIDE_BRACKET_FOR_RE + "])",
r"\\\g<char>",
TO_ESCAPE_FOR_RE)
return re.sub("(?P<char>[" + to_escape + "])",
r"\\\g<char>",
string)
class MetaDataModel(type):
"""
Meta class for DataModel.
Used to have a 'class property' behavor for the:
_files, _root and _scopes class attribut.
i.e. they can't be modified outside DataModel.
"""
@property
def files(cls):
return cls._files
@files.setter
def files(self, value):
self._files = value
@property
def root(cls):
return cls._root
@root.setter
def root(self, value):
self._root = value
@property
def scopes(cls):
return cls._scopes
@scopes.setter
def scopes(self, value):
self._scopes = value
@property
def document_path(self):
return self._document_path
@document_path.setter
def document_path(self, value):
self._document_path = value
class DataModelError(Exception):
pass
class DataModel(metaclass=MetaDataModel):
_files = None
_root = None
_scopes = None
_document_path = None
def __init__(self, yaml_doc, yaml_doc_dir, scope_to_override):
# Check if the class has already been setup.
if(DataModel.files is not None and DataModel.root is not None and
DataModel.scopes is not None):
logging.warn("DataModel have already been setup:\nroot: %s"
"\n%s files\n%s scopes", DataModel.root,
len(DataModel.scopes), len(DataModel.scopes))
DataModel.document_path = yaml_doc_dir
# Change helpers class instance attribut so all instances of Evaluators
# will use it as helpers
from evaluator import Evaluator
# update yaml_doc with scopte_to_override before setting helpers.
# if scope_to_override in yaml_doc:
yaml_doc.update(scope_to_override)
Evaluator.set_helpers(yaml_doc)
try:
DataModel._set_root(yaml_doc['__ROOT__'])
except KeyError:
logging.error("configuration file must have a '__ROOT__' "
"attribute.")
except (OSError, KeyError, TypeError):
logging.critical("unable to build data model. "
"bad key: '__ROOT__'")
raise
try:
if(DataModel.scopes is None):
DataModel.scopes = dict()
scope_dict = yaml_doc['__SCOPES__']
# check if scope to override are ok.
for scope in scope_to_override:
if scope not in scope_dict:
logging.critical("Unable to find overrided scope '" +
settings.FAIL + "{}".format(scope) +
settings.ENDCBOLD)
raise
scope_dict.update(scope_to_override)
DataModel._make_scopes(scope_dict)
logging.debug("Scopes:\n%s", pformat(DataModel.scopes))
except KeyError:
logging.error("configuration file must have a '__SCOPES__' "
"attribute.")
logging.critical("unable to build data model. "
"bad key: '__SCOPES__'")
raise DataModelError()
@classmethod
def _set_root(cls, root):
from evaluator import Evaluator
evltr = Evaluator()
root = evltr.evaluate(root)
cls.root = path.Path(root).abspath()
try:
cls.files = sorted(cls.root.walkfiles())
logging.debug("files:\n%s", pformat(cls.files))
except OSError:
logging.error("no such directory: ('%s')", cls.root)
raise
@classmethod
def _make_scopes(cls, peers):
from evaluator import Evaluator
evltr = Evaluator()
for key in peers:
name = key
try:
expression = evltr.evaluate(peers[key])
except (TypeError, KeyError):
logging.critical("Error in __SCOPES__ definition for {0}"
"".format(key))
raise
values = set()
for f in cls.files:
try:
match = re.search(r".*?" + expression, f)
except re.error:
logging.critical("bad regular expression '%s' for %s: ",
key, expression)
raise
if(match):
values.add(escape_reserved_re_char(match.group(0)))
cls.scopes[name] = Scope(name, expression, sorted(values))
| gpl-2.0 | 2,314,902,069,117,421,600 | 32 | 79 | 0.546356 | false | 4.314397 | false | false | false |
felixzhao/BookDigger | GetBookList/queryBookListByTag.py | 1 | 1587 | #encoding:UTF-8
import requests
import time
import datetime
import sys
from bs4 import BeautifulSoup
import re
def getInfo(soup):
print('in get info.')
result = ''
a = soup.findAll('dl')
for b in a:
c = b.find('a',{'class':'title'})
d = c.string
print(d)
result += d + '\n'
return result
def getPage(url_path):
print(url_path + ' ' + 'datetime? : ' + str(datetime.datetime.now()))
try:
print('开始新的一页处理。')
response = requests.get(url_path)
if response.status_code != 200:
print("\n!!! 网络访问返回异常,异常代码:" + str(response.status_code) + " !!!\n")
print('获取内容信息完成。')
return response.text
#soup = BeautifulSoup(response.text)
time.sleep(1)
except Exception:
print('get Exception.')
print(sys.exc_info())
pass
def getList(url_path, fout):
url_path += '?start='
for i in range(0,100):
p = url_path + str(i * 15)
responseText = getPage(p)
soup = BeautifulSoup(responseText)
r = getInfo(soup)
print(r)
fout.write(r)
fout.write('\n')
fout.write(' >>>>>> the ' + str(i) + ' page. <<<<<<\n')
fout.write('\n')
if __name__ == '__main__':
name = '关于儿童文学的书'
root = 'http://www.douban.com/tag/%E5%84%BF%E7%AB%A5%E6%96%87%E5%AD%A6/book'
fout = open('../rating_out/' + name + '.txt','w')
getList(root, fout)
fout.close()
| apache-2.0 | 7,360,206,904,995,574,000 | 24.542373 | 80 | 0.521566 | false | 3.001992 | false | false | false |
jigarkb/CTCI | LeetCode/281-M-ZigzagIterator.py | 2 | 1454 | # Given two 1d vectors, implement an iterator to return their elements alternately.
#
# For example, given two 1d vectors:
#
# v1 = [1, 2]
# v2 = [3, 4, 5, 6]
# By calling next repeatedly until hasNext returns false, the order of elements returned by next should be:
# [1, 3, 2, 4, 5, 6].
#
# Follow up: What if you are given k 1d vectors? How well can your code be extended to such cases?
#
# Clarification for the follow up question - Update (2015-09-18):
# The "Zigzag" order is not clearly defined and is ambiguous for k > 2 cases.
# If "Zigzag" does not look right to you, replace "Zigzag" with "Cyclic". For example, given the following input:
#
# [1,2,3]
# [4,5,6,7]
# [8,9]
# It should return [1,4,8,2,5,9,3,6,7].
class ZigzagIterator(object):
def __init__(self, v1, v2):
"""
Initialize your data structure here.
:type v1: List[int]
:type v2: List[int]
"""
self.count = 0
self.all_lists = [(len(v), iter(v)) for v in (v1, v2) if v]
def next(self):
"""
:rtype: int
"""
n, v = self.all_lists.pop(0)
if n > 1:
self.all_lists.append((n - 1, v))
return v.next()
def hasNext(self):
"""
:rtype: bool
"""
return bool(self.all_lists)
# Note:
# Maintaining a tuple of len, iterator for each of the v1 and v2 and poping off and appending to end while there is
# still elements in it
| mit | -3,134,418,321,462,703,600 | 28.08 | 117 | 0.594223 | false | 3.073996 | false | false | false |
morganoh/Python_With_Kirk | week4_programs/rtr2_change_buffer_take2.py | 1 | 1465 | #!/usr/bin/env python
""" use paramiko to send cmd to RTR2 """
import paramiko
import time
from getpass import getpass
MAX_BUFFER = 65535
def prepare_buffer(rtr2):
if rtr2.recv_ready():
# print 'buffer is full'
return rtr2.recv(MAX_BUFFER)
def disable_paging(rtr2):
cmd = 'terminal length 0 \n'
rtr2.send(cmd)
time.sleep(1)
prepare_buffer(rtr2)
def send_cmd(rtr2, cmd):
#print cmd
rtr2.send(cmd)
time.sleep(2)
if rtr2.recv_ready():
return rtr2.recv(MAX_BUFFER)
else:
print 'buffer is empty'
def main():
"""
set up paramiko cxn and send the cmd
"""
ip_addr = '50.76.53.27'
port = 8022
username = 'pyclass'
password = getpass()
remote_conn_pre = paramiko.SSHClient()
remote_conn_pre.load_system_host_keys()
remote_conn_pre.connect(ip_addr, port=port, username=username, password=password, look_for_keys=False, allow_agent=False)
rtr2 = remote_conn_pre.invoke_shell()
prepare_buffer(rtr2)
disable_paging(rtr2)
cmd = 'show run | inc logging \n'
output = send_cmd(rtr2, cmd)
print output
cmd = 'conf t \n'
send_cmd(rtr2, cmd)
cmd = 'logging buffered 30000 \n'
send_cmd(rtr2, cmd)
cmd = 'exit \n'
send_cmd(rtr2, cmd)
cmd = 'wr \n'
send_cmd(rtr2, cmd)
cmd = 'show run | inc logging \n'
output = send_cmd(rtr2, cmd)
print output
if __name__ == "__main__":
main()
| apache-2.0 | -1,131,112,839,962,067,300 | 20.865672 | 125 | 0.607509 | false | 3.008214 | false | false | false |
ccqpein/what_to_eat | UI.py | 1 | 1530 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
try:
from PySide import QtCore, QtGui
except ImportError:
print("没有Qt模块,将在命令行操作")
os.system("python " + sys.path[0] + "/main.py")
os._exit(0)
from StoreData import store_list, all_store, store_init, add_new_store
from main import main
# Qt module has abandon
'''
qtCreatorFile = sys.path[0] + "/UIView.ui" # Enter file here.
Ui_MainWindow, QtBaseClass = uic.loadUiType(qtCreatorFile)'''
from UIView import Ui_MainWindow
class MyApp(QtGui.QMainWindow, Ui_MainWindow):
"""define UI init"""
def __init__(self):
QtGui.QMainWindow.__init__(self)
Ui_MainWindow.__init__(self)
self.setupUi(self)
self.viewlist()
def viewlist(self):
ss = "\n".join([st[1] for st in store_list])
self.StoreListView.setText(ss)
class what_to_eat(MyApp):
"""操作部分"""
def __init__(self):
super(what_to_eat, self).__init__()
self.pushButton.clicked.connect(self.result)
self.pushButton_2.clicked.connect(self.add)
def result(self):
self.resultBrowser.setText(main(1).decode("utf-8"))
def add(self):
a = store_init()
a.name = self.newst.toPlainText()
a.address = self.newadd.toPlainText()
if a.name:
add_new_store(a)
else:
pass
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
window = what_to_eat()
window.show()
sys.exit(app.exec_()) | mit | -4,852,610,911,294,674,000 | 24.40678 | 70 | 0.608144 | false | 3.140461 | false | false | false |
blubberdiblub/eztemplate | eztemplate/engines/string_formatter_engine.py | 1 | 3124 | #!/usr/bin/env python
"""Provide the standard Python string.Formatter engine."""
from __future__ import absolute_import
from __future__ import print_function
import string
try:
basestring
except NameError:
basestring = str
from . import Engine
class MissingField(object):
"""Represent a missing field for unprocessed output."""
def __init__(self, field_name):
"""Initialize field name."""
self.field_name = field_name
self.conversion = None
self.format_spec = None
def __str__(self):
"""Yield representation as close to original spec as possible."""
return '{%s%s%s}' % (
self.field_name,
'!' + self.conversion if self.conversion else '',
':' + self.format_spec if self.format_spec else '',
)
class FormatterWrapper(string.Formatter):
"""Wrap string.Formatter.
Handle only a mapping and provide tolerance.
"""
def __init__(self, tolerant=False, **kwargs):
"""Initialize FormatterWrapper."""
super(FormatterWrapper, self).__init__(**kwargs)
self.tolerant = tolerant
def get_value(self, key, args, kwargs):
"""Get value only from mapping and possibly convert key to string."""
if (self.tolerant and
not isinstance(key, basestring) and
key not in kwargs):
key = str(key)
return kwargs[key]
def get_field(self, field_name, args, kwargs):
"""Create a special value when field missing and tolerant."""
try:
obj, arg_used = super(FormatterWrapper, self).get_field(
field_name, args, kwargs)
except (KeyError, IndexError, AttributeError):
if not self.tolerant:
raise
obj = MissingField(field_name)
arg_used = field_name
return obj, arg_used
def convert_field(self, value, conversion):
"""When field missing, store conversion specifier."""
if isinstance(value, MissingField):
if conversion is not None:
value.conversion = conversion
return value
return super(FormatterWrapper, self).convert_field(value, conversion)
def format_field(self, value, format_spec):
"""When field missing, return original spec."""
if isinstance(value, MissingField):
if format_spec is not None:
value.format_spec = format_spec
return str(value)
return super(FormatterWrapper, self).format_field(value, format_spec)
class StringFormatter(Engine):
"""String.Formatter engine."""
handle = 'string.Formatter'
def __init__(self, template, tolerant=False, **kwargs):
"""Initialize string.Formatter."""
super(StringFormatter, self).__init__(**kwargs)
self.template = template
self.formatter = FormatterWrapper(tolerant=tolerant)
def apply(self, mapping):
"""Apply a mapping of name-value-pairs to a template."""
return self.formatter.vformat(self.template, None, mapping)
| mit | -9,180,447,834,391,272,000 | 28.471698 | 77 | 0.607234 | false | 4.501441 | false | false | false |
e-gun/HipparchiaServer | server/_deprecated/_vectors/wordbaggers.py | 1 | 4877 | # -*- coding: utf-8 -*-
"""
HipparchiaServer: an interface to a database of Greek and Latin texts
Copyright: E Gunderson 2016-21
License: GNU GENERAL PUBLIC LICENSE 3
(see LICENSE in the top level directory of the distribution)
"""
from collections import deque
from server.dbsupport.miscdbfunctions import resultiterator
from server.dbsupport.tablefunctions import assignuniquename
from server.hipparchiaobjects.connectionobject import ConnectionObject
def buildwordbags(searchobject, morphdict: dict, sentences: list) -> deque:
"""
return the bags after picking which bagging method to use
:param searchobject:
:param morphdict:
:param sentences:
:return:
"""
searchobject.poll.statusis('Building bags of words')
baggingmethods = {'flat': buildflatbagsofwords,
'alternates': buildbagsofwordswithalternates,
'winnertakesall': buildwinnertakesallbagsofwords,
'unlemmatized': buidunlemmatizedbagsofwords}
bagofwordsfunction = baggingmethods[searchobject.session['baggingmethod']]
bagsofwords = bagofwordsfunction(morphdict, sentences)
return bagsofwords
def buildwinnertakesallbagsofwords(morphdict, sentences) -> deque:
"""
turn a list of sentences into a list of list of headwords
here we figure out which headword is the dominant homonym
then we just use that term
esse ===> sum
esse =/=> edo
assuming that it is faster to do this 2x so you can do a temp table query rather than iterate into DB
not tested/profiled, though...
:param morphdict:
:param sentences:
:return:
"""
# PART ONE: figure out who the "winners" are going to be
bagsofwords = buildflatbagsofwords(morphdict, sentences)
allheadwords = {w for bag in bagsofwords for w in bag}
dbconnection = ConnectionObject(readonlyconnection=False)
dbconnection.setautocommit()
dbcursor = dbconnection.cursor()
rnd = assignuniquename(6)
tqtemplate = """
CREATE TEMPORARY TABLE temporary_headwordlist_{rnd} AS
SELECT headwords AS hw FROM unnest(ARRAY[{allwords}]) headwords
"""
qtemplate = """
SELECT entry_name, total_count FROM {db}
WHERE EXISTS
(SELECT 1 FROM temporary_headwordlist_{rnd} temptable WHERE temptable.hw = {db}.entry_name)
"""
tempquery = tqtemplate.format(rnd=rnd, allwords=list(allheadwords))
dbcursor.execute(tempquery)
# https://www.psycopg.org/docs/extras.html#psycopg2.extras.execute_values
# third parameter is
query = qtemplate.format(rnd=rnd, db='dictionary_headword_wordcounts')
dbcursor.execute(query)
results = resultiterator(dbcursor)
randkedheadwords = {r[0]: r[1] for r in results}
# PART TWO: let the winners take all
bagsofwords = deque()
for s in sentences:
lemattized = deque()
for word in s:
# [('x', 4), ('y', 5), ('z', 1)]
try:
possibilities = sorted([(item, randkedheadwords[item]) for item in morphdict[word]], key=lambda x: x[1])
# first item of last tuple is the winner
lemattized.append(possibilities[-1][0])
except KeyError:
pass
if lemattized:
bagsofwords.append(lemattized)
return bagsofwords
def buidunlemmatizedbagsofwords(morphdict, sentences) -> deque:
"""
you wasted a bunch of cycles generating the morphdict, now you will fail to use it...
what you see is what you get...
:param morphdict:
:param sentences:
:return:
"""
bagsofwords = sentences
return bagsofwords
def buildflatbagsofwords(morphdict, sentences) -> deque:
"""
turn a list of sentences into a list of list of headwords
here we put alternate possibilities next to one another:
ϲυγγενεύϲ ϲυγγενήϲ
in buildbagsofwordswithalternates() we have one 'word':
ϲυγγενεύϲ·ϲυγγενήϲ
:param morphdict:
:param sentences:
:return:
"""
bagsofwords = deque()
for s in sentences:
lemattized = deque()
for word in s:
try:
# WARNING: we are treating homonymns as if 2+ words were there instead of just one
# 'rectum' will give you 'rectus' and 'rego'; 'res' will give you 'reor' and 'res'
# this necessarily distorts the vector space
lemattized.append([item for item in morphdict[word]])
except KeyError:
pass
# flatten
bagsofwords.append([item for sublist in lemattized for item in sublist])
return bagsofwords
def buildbagsofwordswithalternates(morphdict, sentences) -> deque:
"""
buildbagsofwords() in rudimentaryvectormath.py does this but flattens rather than
joining multiple possibilities
here we have one 'word':
ϲυγγενεύϲ·ϲυγγενήϲ
there we have two:
ϲυγγενεύϲ ϲυγγενήϲ
:param morphdict:
:param sentences:
:return:
"""
bagsofwords = deque()
for s in sentences:
lemmatizedsentence = deque()
for word in s:
try:
lemmatizedsentence.append('·'.join(morphdict[word]))
except KeyError:
pass
bagsofwords.append(lemmatizedsentence)
return bagsofwords
| gpl-3.0 | 1,389,683,665,494,826,200 | 24.294737 | 108 | 0.731586 | false | 3.063098 | false | false | false |
Sticklyman1936/workload-automation | wlauto/workloads/apklaunch/__init__.py | 3 | 2431 | # Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=attribute-defined-outside-init
import os
from wlauto import Workload, Parameter
from wlauto import File
from wlauto.exceptions import ConfigError
from wlauto.utils.android import ApkInfo
class ApkLaunchWorkload(Workload):
name = 'apklaunch'
description = '''
Installs and runs a .apk file, waits wait_time_seconds, and tests if the app
has started successfully.
'''
supported_platforms = ['android']
parameters = [
Parameter('apk_file', description='Name to the .apk to run', mandatory=True),
Parameter('uninstall_required', kind=bool, default=False,
description='Set to true if the package should be uninstalled'),
Parameter('wait_time_seconds', kind=int, default=0,
description='Seconds to wait before testing if the app is still alive')
]
def setup(self, context):
apk_file = context.resolver.get(File(self, self.apk_file))
self.package = ApkInfo(apk_file).package # pylint: disable=attribute-defined-outside-init
self.logger.info('Installing {}'.format(apk_file))
return self.device.install(apk_file)
def run(self, context):
self.logger.info('Starting {}'.format(self.package))
self.device.execute('am start -W {}'.format(self.package))
self.logger.info('Waiting {} seconds'.format(self.wait_time_seconds))
self.device.sleep(self.wait_time_seconds)
def update_result(self, context):
app_is_running = bool([p for p in self.device.ps() if p.name == self.package])
context.result.add_metric('ran_successfully', app_is_running)
def teardown(self, context):
if self.uninstall_required:
self.logger.info('Uninstalling {}'.format(self.package))
self.device.execute('pm uninstall {}'.format(self.package))
| apache-2.0 | 2,307,262,621,845,389,300 | 38.852459 | 98 | 0.691074 | false | 3.972222 | false | false | false |
M4telight/pymlbadge | pyml_badge.py | 1 | 4777 | import ugfx
import badge
import wifi
import network
from time import sleep
import usocket as socket
state_map = {
'up': 0,
'down': 1,
'left': 2,
'right': 3,
'a': 4,
'b': 5,
'start': 8,
'select': 9,
}
states = [0 for _ in range(14)]
def handle_key(id, pressed):
states[id] = pressed
connection.send_key_states(states)
def handle_up(pressed):
handle_key(state_map['up'], int(pressed))
def handle_down(pressed):
handle_key(state_map['down'], int(pressed))
def handle_left(pressed):
handle_key(state_map['left'], int(pressed))
def handle_right(pressed):
handle_key(state_map['right'], int(pressed))
def connect_to_wifi(ssid='pymlbadge', password='pymlbadge'):
show_message("Waiting for wifi...")
wlan = network.WLAN(network.STA_IF)
if not wlan.active() or not wlan.isconnected():
wlan.active(True)
print('connecting to:', ssid)
wlan.connect(ssid, password)
while not wlan.isconnected():
sleep(0.1)
print('network config:', wlan.ifconfig())
show_message("Connected")
def init_badge():
badge.init()
ugfx.init()
wifi.init()
connect_to_wifi()
def show_message(message):
ugfx.clear(ugfx.WHITE)
ugfx.string(10, 10, message, "Roboto_Regular12", 0)
ugfx.flush()
class Connection:
def __init__(self, listen_port, control_addr, control_port):
self.uid = None
self.listen_port = listen_port
self.listen_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
self.listen_sock.setblocking(False)
# self.listen_sock.bind(('0.0.0.0', self.listen_port))
addr = socket.getaddrinfo('0.0.0.0', listen_port)
self.listen_sock.bind(addr[0][-1])
self.control_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
self.control_dest = []
while len(self.control_dest) == 0:
self.control_dest = socket.getaddrinfo(control_addr, control_port)
self.control_dest = self.control_dest[0][-1]
print("registering")
self.register()
def ready(self):
return self.uid is not None
def register(self):
command = '/controller/new/{port}'.format(port=self.listen_port)
try:
self.control_sock.sendto(command.encode('utf-8'), self.control_dest)
except Exception as ex:
print("failed to register controller: {}".format(ex))
def handle_read(self, data):
data = data.decode('utf-8')
if '/' not in data: # bad, malicous data!!
return
command, data = data.rsplit('/', 1)
if command.startswith('/uid'):
self.handle_uid(data)
elif command.startswith('/rumble'):
# self.handle_rumble(data)
pass
elif command.startswith('/message'):
# self.handle_message(data)
pass
elif command.startswith('/download'):
# self.handle_download(data)
pass
elif command.startswith('/play'):
# self.handle_play(data)
pass
def handle_uid(self, data):
self.uid = data
print("Got UID {}".format(data))
self.init_inputs()
def start_listening(self):
self.listening = True
self._listener_loop()
def stop_listening(self):
self.listening = False
def _listener_loop(self):
while self.listening:
try:
data, addr = self.listen_sock.recvfrom(1024)
self.handle_read(data)
except:
pass
sleep(0.01)
def init_inputs(self):
print("initializing input callbacks")
ugfx.input_init()
ugfx.input_attach(ugfx.JOY_UP, handle_up)
ugfx.input_attach(ugfx.JOY_DOWN, handle_down)
ugfx.input_attach(ugfx.JOY_LEFT, handle_left)
ugfx.input_attach(ugfx.JOY_RIGHT, handle_right)
ugfx.input_attach(ugfx.BTN_A, handle_up)
ugfx.input_attach(ugfx.BTN_B, handle_up)
ugfx.input_attach(ugfx.BTN_SELECT, handle_up)
ugfx.input_attach(ugfx.BTN_START, handle_up)
def ping(self):
command = '/controller/{uid}/ping/{port}'.format(
uid=self.uid,
port=self.port
)
socket.sendto(command.encode('utf-8'), self.control_dest)
def send_key_states(self, states):
command = '/controller/{uid}/states/{states}'.format(
uid=self.uid, states=''.join(map(str, states)))
self.listen_sock.sendto(command.encode('utf-8'), self.control_dest)
init_badge()
destination = 'control.ilexlux.xyz'
show_message("Connecting to {}".format(destination))
connection = Connection(1338, destination, 1338)
connection.start_listening()
| gpl-3.0 | -9,105,801,987,702,029,000 | 26.773256 | 80 | 0.597027 | false | 3.446609 | false | false | false |
quamis/pyBackup | pyBackup/Writer/Backup/Writer.py | 1 | 1486 | import os, shutil
import logging
class Writer(object):
def __init__(self, backupBasePath, sourceBackupBasePath, sourceSourceBasePath, id):
self.backupBasePath = backupBasePath
self.sourceBackupBasePath = sourceBackupBasePath
self.sourceSourceBasePath = sourceSourceBasePath
self.id = id
def initialize(self):
logging.info("writer initialize")
def destroy(self):
self.commit()
def commit(self):
pass
def getFilePathInSource(self, npath):
return "%s%s/%s" % (self.backupBasePath, self.id, npath.replace(self.sourceSourceBasePath, '', 1))
def getFilePathInBackup(self, npath):
return self.sourceBackupBasePath + npath.replace(self.sourceSourceBasePath, '', 1)
def getDestinationFilePathToContent(self, p):
return p
def updateFile(self, opath, npath):
dst = self.getFilePathInSource(npath.path)
src = self.getFilePathInBackup(npath.path)
self.mkdir(dst)
shutil.copyfile(src, dst)
def deleteFile(self, opath):
dst = self.getFilePathInSource(opath.path)
src = self.getFilePathInBackup(opath.path)
self.mkdir(dst)
shutil.copyfile(src, dst)
def mkdir(self, fpath):
dname = os.path.dirname(fpath)+'/'
if not os.path.isdir(dname):
os.makedirs(dname)
| gpl-2.0 | -4,966,824,692,588,552,000 | 29.659574 | 106 | 0.604307 | false | 4.049046 | false | false | false |
G4brym/Ecomerce | Main/utilities/general.py | 1 | 1889 | from Main.models.categories import Category, Product_category
from Main.models.products import Product
from Main.utilities.logs import logError
from django.utils import timezone
import datetime
def getCategoryFromProduct(product):
category = Product_category.objects.filter(product_id=product["id"])[0].category.get_as_dict()
category2 = None
category1 = None
category0 = None
try:
if category["level_depth"] == 2:
category2 = category
category1 = Category.objects.get(id=category["parent_id"]).get_as_dict()
category0 = Category.objects.get(id=category2["parent_id"]).get_as_dict()
elif category["level_depth"] == 1:
category1 = category
category0 = Category.objects.get(id=category1["parent_id"]).get_as_dict()
else:
category0 = category
except:
logError("category not found on product page id:" + str(product["id"]))
return {"category0": category}
return {
"category2": category2,
"category1": category1,
"category0": category0
}
def getDailyDeals():
#todo modify to get the actual deals
products = Product.objects.filter(daily_deal=True)[:5]
tmp = []
end = (timezone.localtime(timezone.now()) + datetime.timedelta(days=1)).strftime('%m/%d/%Y 00:00:00')
for prod in products:
tmp.append(prod.get_as_big_dict())
return {
"products": tmp,
"endtime": end
}
def getLayoutCategories():
categories = Category.objects.filter(active=True, level_depth=0).order_by('position')
final_categories = []
for category in categories:
final_categories.append(category.get_as_dict())
return final_categories
def getMainDict():
return {
"layoutCategories": getLayoutCategories()
} | apache-2.0 | -3,354,790,142,302,766,600 | 29 | 105 | 0.627316 | false | 4.027719 | false | false | false |
hurricup/intellij-community | python/helpers/python-skeletons/pytest/__init__.py | 1 | 8866 | """Skeleton for 'pytest'.
Project: pytest 2.6.4 <http://pytest.org/latest/>
Skeleton by: Bruno Oliveira <[email protected]>
Exposing everything that can be extracted from `pytest_namespace` hook
in standard pytest modules, using original docstrings.
"""
# _pytest.genscript
def freeze_includes():
"""
Returns a list of module names used by py.test that should be
included by cx_freeze.
"""
# _pytest.main
class collect:
class Item:
""" a basic test invocation item. Note that for a single function
there might be multiple test invocation items.
"""
class Collector:
""" Collector instances create children through collect()
and thus iteratively build a tree.
"""
class File:
""" base class for collecting tests from a file. """
class Session:
"""
"""
# _pytest.python
class Module:
""" Collector for test classes and functions. """
class Class:
""" Collector for test methods. """
class Instance:
"""
"""
class Function:
""" a Function Item is responsible for setting up and executing a
Python test function.
"""
class Generator:
"""
"""
@staticmethod
def _fillfuncargs(function):
""" fill missing funcargs for a test function. """
# _pytest.mark
class mark:
def __getattr__(self, item):
"""
This class may have any attribute, so this method should exist
"""
pass
@staticmethod
def skipif(condition, reason=None):
"""skip the given test function if eval(condition) results in a True
value.
Optionally specify a reason for better reporting.
Evaluation happens within the module global context.
Example: ``skipif('sys.platform == "win32"')`` skips the test if
we are on the win32 platform.
see http://pytest.org/latest/skipping.html
"""
@staticmethod
def xfail(condition=None, reason=None, raises=None, run=True, strict=False):
"""mark the the test function as an expected failure if eval(condition)
has a True value.
Optionally specify a reason for better reporting and run=False if
you don't even want to execute the test function.
See http://pytest.org/latest/skipping.html
"""
@staticmethod
def parametrize(argnames, argvalues):
"""call a test function multiple times passing in different arguments
in turn.
:type argnames: str | list[str]
:param argvalues: generally needs to be a list of values if argnames
specifies only one name or a list of tuples of values if
argnames specifies multiple names.
Example: @parametrize('arg1', [1,2]) would lead to two calls of the
decorated test function, one with arg1=1 and another with arg1=2.
see http://pytest.org/latest/parametrize.html for more info
and examples.
"""
@staticmethod
def usefixtures(*fixturenames):
"""mark tests as needing all of the specified fixtures.
see http://pytest.org/latest/fixture.html#usefixtures
"""
@staticmethod
def tryfirst(f):
"""mark a hook implementation function such that the plugin machinery
will try to call it first/as early as possible.
"""
@staticmethod
def trylast(f):
"""mark a hook implementation function such that the plugin machinery
will try to call it last/as late as possible.
"""
@staticmethod
def hookwrapper(f):
"""A hook wrapper is a generator function which yields exactly once.
When pytest invokes hooks it first executes hook wrappers and passes
the same arguments as to the regular hooks.
"""
# _pytest.pdb
def set_trace():
""" invoke PDB set_trace debugging, dropping any IO capturing. """
# _pytest.python
def raises(ExpectedException, *args, **kwargs):
""" assert that a code block/function call raises @ExpectedException and
raise a failure exception otherwise.
:type ExpectedException: T
This helper produces a ``py.code.ExceptionInfo()`` object.
If using Python 2.5 or above, you may use this function as a
context manager::
>>> with raises(ZeroDivisionError):
... 1/0
Or you can specify a callable by passing a to-be-called lambda::
>>> raises(ZeroDivisionError, lambda: 1/0)
<ExceptionInfo ...>
or you can specify an arbitrary callable with arguments::
>>> def f(x): return 1/x
...
>>> raises(ZeroDivisionError, f, 0)
<ExceptionInfo ...>
>>> raises(ZeroDivisionError, f, x=0)
<ExceptionInfo ...>
A third possibility is to use a string to be executed::
>>> raises(ZeroDivisionError, "f(0)")
<ExceptionInfo ...>
Performance note:
-----------------
Similar to caught exception objects in Python, explicitly clearing
local references to returned ``py.code.ExceptionInfo`` objects can
help the Python interpreter speed up its garbage collection.
Clearing those references breaks a reference cycle
(``ExceptionInfo`` --> caught exception --> frame stack raising
the exception --> current frame stack --> local variables -->
``ExceptionInfo``) which makes Python keep all objects referenced
from that cycle (including all local variables in the current
frame) alive until the next cyclic garbage collection run. See the
official Python ``try`` statement documentation for more detailed
information.
"""
def fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a fixture factory function.
This decorator can be used (with or or without parameters) to define
a fixture function. The name of the fixture function can later be
referenced to cause its invocation ahead of running tests: test
modules or classes can use the pytest.mark.usefixtures(fixturename)
marker. Test functions can directly use fixture names as input
arguments in which case the fixture instance returned from the fixture
function will be injected.
:arg scope: the scope for which this fixture is shared, one of
"function" (default), "class", "module", "session".
:arg params: an optional list of parameters which will cause multiple
invocations of the fixture function and all of the tests
using it.
:arg autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:arg ids: list of string ids each corresponding to the params
so that they are part of the test id. If no ids are provided
they will be generated automatically from the params.
"""
def yield_fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a yield-fixture factory function
(EXPERIMENTAL).
This takes the same arguments as :py:func:`pytest.fixture` but
expects a fixture function to use a ``yield`` instead of a ``return``
statement to provide a fixture. See
http://pytest.org/en/latest/yieldfixture.html for more info.
"""
# _pytest.recwarn
def deprecated_call(func, *args, **kwargs):
""" assert that calling ``func(*args, **kwargs)``
triggers a DeprecationWarning.
"""
# _pytest.runner
def exit(msg):
""" exit testing process as if KeyboardInterrupt was triggered. """
exit.Exception = Exception
def skip(msg=""):
""" skip an executing test with the given message. Note: it's usually
better to use the pytest.mark.skipif marker to declare a test to be
skipped under certain conditions like mismatching platforms or
dependencies. See the pytest_skipping plugin for details.
"""
skip.Exception = Exception
def fail(msg="", pytrace=True):
""" explicitely fail an currently-executing test with the given Message.
:arg pytrace: if false the msg represents the full failure information
and no python traceback will be reported.
"""
fail.Exception = Exception
def importorskip(modname, minversion=None):
""" return imported module if it has at least "minversion" as its
__version__ attribute. If no minversion is specified the a skip
is only triggered if the module can not be imported.
Note that version comparison only works with simple version strings
like "1.2.3" but not "1.2.3.dev1" or others.
"""
# _pytest.skipping
def xfail(reason=""):
""" xfail an executing test or setup functions with the given reason.
"""
| apache-2.0 | -3,909,746,662,439,518,000 | 30.439716 | 80 | 0.655651 | false | 4.584281 | true | false | false |
muraliselva10/cloudkitty | cloudkitty/storage/sqlalchemy/alembic/versions/3eecce93ff43_create_invoice_details_table.py | 1 | 1278 | """create invoice_details table
Revision ID: 3eecce93ff43
Revises: 792b438b663
Create Date: 2016-03-29 22:16:01.022645
"""
# revision identifiers, used by Alembic.
revision = '3eecce93ff43'
down_revision = '792b438b663'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('invoice_details',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('invoice_date', sa.DateTime(), nullable=False),
sa.Column('invoice_period_from', sa.DateTime(), nullable=False),
sa.Column('invoice_period_to', sa.DateTime(), nullable=False),
sa.Column('tenant_id', sa.String(length=255), nullable=False),
sa.Column('tenant_name', sa.String(length=255), nullable=False),
sa.Column('invoice_id', sa.String(length=255), nullable=False),
sa.Column('invoice_data', sa.Text(), nullable=False),
sa.Column('total_cost', sa.Numeric(precision=13,scale=2), nullable=True),
sa.Column('paid_cost', sa.Numeric(precision=13,scale=2), nullable=True),
sa.Column('balance_cost', sa.Numeric(precision=13,scale=2), nullable=True),
sa.Column('payment_status', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8',
mysql_engine='InnoDB')
def downgrade():
op.drop_table('invoice_details')
| apache-2.0 | 1,116,278,782,699,685,900 | 35.514286 | 79 | 0.701095 | false | 3.219144 | false | false | false |
project-rig/nengo_spinnaker | nengo_spinnaker/regions/keyspaces.py | 1 | 6030 | from six import iteritems
import struct
from .region import Region
class KeyspacesRegion(Region):
"""A region of memory which represents data formed from a list of
:py:class:`~rig.bitfield.BitField` instances representing SpiNNaker routing
keys.
Each "row" represents a keyspace, and each "column" is formed by getting
the result of a function applied to the keyspace. Each field will be one
word long, and all keyspaces are expected to be 32-bit long.
"""
def __init__(self, signals_and_arguments,
fields=list(),
partitioned_by_atom=False,
prepend_num_keyspaces=False):
"""Create a new region representing keyspace information.
Parameters
----------
signals_and_arguments : [(Signal, **kwargs), ...]
A list of tuples of Signals (which contain/refer to keyspaces) and
arguments used to construct the keyspace from the signal. The
keyspaces will (eventually) be generated by calling
``signal.kespace(**kwarags)``.
fields : iterable
An iterable of callables which will be called on each key and must
return an appropriate sized bytestring representing the data to
write to memory. The appropriate size is the number of bytes
required to represent a full key or mark (e.g., 4 bytes for 32 bit
keyspaces).
partitioned_by_atom : bool
If True then one set of fields will be written out per atom, if
False then fields for all keyspaces are written out regardless of
the vertex slice.
prepend_num_keyspaces : bool
Prepend a word containing the number of keyspaces to the region
data when it is written out.
"""
# Save the keyspaces, fields and partitioned status
self.signals_and_arguments = list(signals_and_arguments)
self.fields = list(fields)
self.partitioned = partitioned_by_atom
self.prepend_num_keyspaces = prepend_num_keyspaces
self.bytes_per_field = 4
def sizeof(self, vertex_slice):
"""Get the size of a slice of this region in bytes.
See :py:meth:`.region.Region.sizeof`
"""
# Get the size from representing the fields
if not self.partitioned:
n_keys = len(self.signals_and_arguments)
else:
assert vertex_slice.stop < len(self.signals_and_arguments) + 1
n_keys = vertex_slice.stop - vertex_slice.start
pp_size = 0 if not self.prepend_num_keyspaces else 4
return self.bytes_per_field * n_keys * len(self.fields) + pp_size
def write_subregion_to_file(self, fp, vertex_slice=None, **field_args):
"""Write the data contained in a portion of this region out to file.
"""
data = b''
# Get a slice onto the keys
if self.partitioned:
assert vertex_slice.stop < len(self.signals_and_arguments) + 1
key_slice = vertex_slice if self.partitioned else slice(None)
# Write the prepends
if self.prepend_num_keyspaces:
nks = len(self.signals_and_arguments[key_slice])
data += struct.pack("<I", nks)
# For each key fill in each field
for signal, kwargs in self.signals_and_arguments[key_slice]:
ks = signal.keyspace(**kwargs) # Construct the keyspace
for field in self.fields:
data += struct.pack("<I", field(ks, **field_args))
# Write out
fp.write(data)
# NOTE: This closure intentionally tries to look like a class.
# TODO: Neaten this docstring.
def KeyField(maps={}, field=None, tag=None):
"""Create new field for a :py:class:`~KeyspacesRegion` that will fill in
specified fields of the key and will then write out a key.
Parameters
----------
maps : dict
A mapping from keyword-argument of the field to the field of the key
that this value should be inserted into.
field : string or None
The field to get the key or None for all fields.
For example:
ks = Keyspace()
ks.add_field(i)
# ...
kf = KeyField(maps={'subvertex_index': 'i'})
k = Keyspace()
kf(k, subvertex_index=11)
Will return the key with the 'i' key set to 11.
"""
key_field = field
def key_getter(keyspace, **kwargs):
# Build a set of fields to fill in
fills = {}
for (kwarg, field) in iteritems(maps):
fills[field] = kwargs[kwarg]
# Build the key with these fills made
key = keyspace(**fills)
return key.get_value(field=key_field, tag=tag)
return key_getter
# NOTE: This closure intentionally tries to look like a class.
def MaskField(**kwargs):
"""Create a new field for a :py:class:`~.KeyspacesRegion` that will write
out a mask value from a keyspace.
Parameters
----------
field : string
The name of the keyspace field to store the mask for.
tag : string
The name of the keyspace tag to store the mask for.
Raises
------
TypeError
If both or neither field and tag are specified.
Returns
-------
function
A function which can be used in the `fields` argument to
:py:class:`~.KeyspacesRegion` that will include a specified mask in the
region data.
"""
# Process the arguments
field = kwargs.get("field")
tag = kwargs.get("tag")
# Create the field method
if field is not None and tag is None:
def mask_getter(keyspace, **kwargs):
return keyspace.get_mask(field=field)
return mask_getter
elif tag is not None and field is None:
def mask_getter(keyspace, **kwargs):
return keyspace.get_mask(tag=tag)
return mask_getter
else:
raise TypeError("MaskField expects 1 argument, "
"either 'field' or 'tag'.")
| mit | -3,928,575,192,786,206,700 | 33.655172 | 79 | 0.615755 | false | 4.285714 | false | false | false |
olduvaihand/ProjectEuler | src/python/problem023.py | 1 | 2055 | # -*- coding: utf-8 -*-
# ProjectEuler/src/python/problem023.py
#
# Non-abundant sums
# =================
# Published on Friday, 2nd August 2002, 06:00 pm
#
# A perfect number is a number for which the sum of its proper divisors is
# exactly equal to the number. For example, the sum of the proper divisors of
# 28 would be 1 + 2 + 4 + 7 + 14 = 28, which means that 28 is a perfect number.
# A number n is called deficient if the sum of its proper divisors is less than
# n and it is called abundant if this sum exceeds n. As 12 is the smallest
# abundant number, 1 + 2 + 3 + 4 + 6 = 16, the smallest number that can be
# written as the sum of two abundant numbers is 24. By mathematical analysis,
# it can be shown that all integers greater than 28123 can be written as the
# sum of two abundant numbers. However, this upper limit cannot be reduced any
# further by analysis even though it is known that the greatest number that
# cannot be expressed as the sum of two abundant numbers is less than this
# limit. Find the sum of all the positive integers which cannot be written as
# the sum of two abundant numbers.
import util
MINIMUM_ABUNDANT_NUMBER = 12
def is_abundant(n):
divisors = util.proper_divisors(n)
return sum(divisors) > n
def calculate_abundant_numbers(max_n):
return [n for n in range(1, max_n) if is_abundant(n)]
def non_abundant_sums(max_n):
sums = []
abundant_numbers = calculate_abundant_numbers(max_n)
abundant_set = set(abundant_numbers)
for i in range(1, max_n+1):
for a in abundant_numbers:
difference = i - a
if difference < MINIMUM_ABUNDANT_NUMBER:
sums.append(i)
break
if difference in abundant_set:
break
else:
sums.append(i)
return sum(sums)
def main():
total = non_abundant_sums(28123)
print "The sum of all positive integers which cannot be written as the ",
print "sum of 2 abundant numbers is %d." % (total,)
if __name__ == "__main__":
main()
| mit | -502,370,277,492,093,950 | 32.655738 | 79 | 0.665855 | false | 3.539655 | false | false | false |
oasis-open/cti-python-stix2 | stix2/markings/__init__.py | 1 | 8378 | """
Functions for working with STIX 2 Data Markings.
These high level functions will operate on both object-level markings and
granular markings unless otherwise noted in each of the functions.
Note:
These functions are also available as methods on SDOs, SROs, and Marking
Definitions. The corresponding methods on those classes are identical to
these functions except that the `obj` parameter is omitted.
.. autosummary::
:toctree: markings
granular_markings
object_markings
utils
|
"""
from stix2.markings import granular_markings, object_markings
def get_markings(obj, selectors=None, inherited=False, descendants=False, marking_ref=True, lang=True):
"""
Get all markings associated to the field(s) specified by selectors.
Args:
obj: An SDO or SRO object.
selectors: string or list of selectors strings relative to the SDO or
SRO in which the properties appear.
inherited (bool): If True, include object level markings and granular
markings inherited relative to the properties.
descendants (bool): If True, include granular markings applied to any
children relative to the properties.
marking_ref (bool): If False, excludes markings that use
``marking_ref`` property.
lang (bool): If False, excludes markings that use ``lang`` property.
Returns:
list: Marking identifiers that matched the selectors expression.
Note:
If ``selectors`` is None, operation will be performed only on object
level markings.
"""
if selectors is None:
return object_markings.get_markings(obj)
results = granular_markings.get_markings(
obj,
selectors,
inherited,
descendants,
marking_ref,
lang,
)
if inherited:
results.extend(object_markings.get_markings(obj))
return list(set(results))
def set_markings(obj, marking, selectors=None, marking_ref=True, lang=True):
"""
Remove all markings associated with selectors and appends a new granular
marking. Refer to `clear_markings` and `add_markings` for details.
Args:
obj: An SDO or SRO object.
marking: identifier or list of marking identifiers that apply to the
properties selected by `selectors`.
selectors: string or list of selectors strings relative to the SDO or
SRO in which the properties appear.
marking_ref (bool): If False, markings that use the ``marking_ref``
property will not be removed.
lang (bool): If False, markings that use the ``lang`` property
will not be removed.
Returns:
A new version of the given SDO or SRO with specified markings removed
and new ones added.
Note:
If ``selectors`` is None, operations will be performed on object level
markings. Otherwise on granular markings.
"""
if selectors is None:
return object_markings.set_markings(obj, marking)
else:
return granular_markings.set_markings(obj, marking, selectors, marking_ref, lang)
def remove_markings(obj, marking, selectors=None):
"""
Remove a marking from this object.
Args:
obj: An SDO or SRO object.
marking: identifier or list of marking identifiers that apply to the
properties selected by `selectors`.
selectors: string or list of selectors strings relative to the SDO or
SRO in which the properties appear.
Raises:
InvalidSelectorError: If `selectors` fail validation.
MarkingNotFoundError: If markings to remove are not found on
the provided SDO or SRO.
Returns:
A new version of the given SDO or SRO with specified markings removed.
Note:
If ``selectors`` is None, operations will be performed on object level
markings. Otherwise on granular markings.
"""
if selectors is None:
return object_markings.remove_markings(obj, marking)
else:
return granular_markings.remove_markings(obj, marking, selectors)
def add_markings(obj, marking, selectors=None):
"""
Append a marking to this object.
Args:
obj: An SDO or SRO object.
marking: identifier or list of marking identifiers that apply to the
properties selected by `selectors`.
selectors: string or list of selectors strings relative to the SDO or
SRO in which the properties appear.
Raises:
InvalidSelectorError: If `selectors` fail validation.
Returns:
A new version of the given SDO or SRO with specified markings added.
Note:
If ``selectors`` is None, operations will be performed on object level
markings. Otherwise on granular markings.
"""
if selectors is None:
return object_markings.add_markings(obj, marking)
else:
return granular_markings.add_markings(obj, marking, selectors)
def clear_markings(obj, selectors=None, marking_ref=True, lang=True):
"""
Remove all markings associated with the selectors.
Args:
obj: An SDO or SRO object.
selectors: string or list of selectors strings relative to the SDO or
SRO in which the field(s) appear(s).
marking_ref (bool): If False, markings that use the ``marking_ref``
property will not be removed.
lang (bool): If False, markings that use the ``lang`` property
will not be removed.
Raises:
InvalidSelectorError: If `selectors` fail validation.
MarkingNotFoundError: If markings to remove are not found on
the provided SDO or SRO.
Returns:
A new version of the given SDO or SRO with specified markings cleared.
Note:
If ``selectors`` is None, operations will be performed on object level
markings. Otherwise on granular markings.
"""
if selectors is None:
return object_markings.clear_markings(obj)
else:
return granular_markings.clear_markings(obj, selectors, marking_ref, lang)
def is_marked(obj, marking=None, selectors=None, inherited=False, descendants=False):
"""
Check if field(s) is marked by any marking or by specific marking(s).
Args:
obj: An SDO or SRO object.
marking: identifier or list of marking identifiers that apply to the
properties selected by `selectors`.
selectors: string or list of selectors strings relative to the SDO or
SRO in which the field(s) appear(s).
inherited (bool): If True, include object level markings and granular
markings inherited to determine if the properties is/are marked.
descendants (bool): If True, include granular markings applied to any
children of the given selector to determine if the properties
is/are marked.
Returns:
bool: True if ``selectors`` is found on internal SDO or SRO collection.
False otherwise.
Note:
When a list of marking identifiers is provided, if ANY of the provided
marking identifiers match, True is returned.
If ``selectors`` is None, operation will be performed only on object
level markings.
"""
if selectors is None:
return object_markings.is_marked(obj, marking)
result = granular_markings.is_marked(
obj,
marking,
selectors,
inherited,
descendants,
)
if inherited:
granular_marks = granular_markings.get_markings(obj, selectors)
object_marks = object_markings.get_markings(obj)
if granular_marks:
result = granular_markings.is_marked(
obj,
granular_marks,
selectors,
inherited,
descendants,
)
result = result or object_markings.is_marked(obj, object_marks)
return result
class _MarkingsMixin(object):
pass
# Note that all of these methods will return a new object because of immutability
_MarkingsMixin.get_markings = get_markings
_MarkingsMixin.set_markings = set_markings
_MarkingsMixin.remove_markings = remove_markings
_MarkingsMixin.add_markings = add_markings
_MarkingsMixin.clear_markings = clear_markings
_MarkingsMixin.is_marked = is_marked
| bsd-3-clause | -2,832,197,835,230,233,600 | 31.854902 | 103 | 0.66424 | false | 4.231313 | false | false | false |
pacifica/pacifica-archiveinterface | pacifica/archiveinterface/backends/oracle_hsm_sideband/archive.py | 2 | 6604 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""HSM Sideband Backend Archive Module.
Module that implements the abstract_backend_archive class for a HSM Sideband
backend.
"""
import os
import stat
import shutil
from ...archive_utils import un_abs_path
from ...config import get_config
from ...exception import ArchiveInterfaceError
from .extended_file_factory import extended_hsmsideband_factory
from ..abstract.archive import AbstractBackendArchive
from ...id2filename import id2filename
def path_info_munge(filepath):
"""Munge the path for this filetype."""
return_path = un_abs_path(id2filename(int(filepath)))
return return_path
class HsmSidebandBackendArchive(AbstractBackendArchive):
"""HSM Sideband Backend Archive Class.
Class that implements the abstract base class for the hsm sideband
archive interface backend.
"""
def __init__(self, prefix):
"""Constructor for HSM Sideband Backend Archive."""
super(HsmSidebandBackendArchive, self).__init__(prefix)
self._prefix = prefix
self._file = None
self._fpath = None
self._filepath = None
# since the database prefix may be different then the system the file is mounted on
self._sam_qfs_prefix = get_config().get(
'hsm_sideband', 'sam_qfs_prefix')
def open(self, filepath, mode):
"""Open a hsm sideband file."""
# want to close any open files first
try:
self.close()
except ArchiveInterfaceError as ex:
err_str = "Can't close previous HSM Sideband file before opening new "\
'one with error: ' + str(ex)
raise ArchiveInterfaceError(err_str)
try:
self._fpath = un_abs_path(filepath)
filename = os.path.join(self._prefix, path_info_munge(self._fpath))
self._filepath = filename
# path database refers to, rather then just the file system mount path
sam_qfs_path = os.path.join(
self._sam_qfs_prefix, path_info_munge(self._fpath))
dirname = os.path.dirname(filename)
if not os.path.isdir(dirname):
os.makedirs(dirname, 0o755)
self._file = extended_hsmsideband_factory(
self._filepath, mode, sam_qfs_path)
return self
except Exception as ex:
err_str = "Can't open HSM Sideband file with error: " + str(ex)
raise ArchiveInterfaceError(err_str)
def close(self):
"""Close a HSM Sideband file."""
try:
if self._file:
self._file.close()
self._file = None
except Exception as ex:
err_str = "Can't close HSM Sideband file with error: " + str(ex)
raise ArchiveInterfaceError(err_str)
def read(self, blocksize):
"""Read a HSM Sideband file."""
try:
if self._file:
return self._file.read(blocksize)
except Exception as ex:
err_str = "Can't read HSM Sideband file with error: " + str(ex)
raise ArchiveInterfaceError(err_str)
err_str = 'Internal file handle invalid'
raise ArchiveInterfaceError(err_str)
def seek(self, offset):
"""Seek in the file to the offset."""
try:
if self._file:
return self._file.seek(offset)
except Exception as ex:
err_str = "Can't seek HSM Sideband file with error: " + str(ex)
raise ArchiveInterfaceError(err_str)
err_str = 'Internal file handle invalid'
raise ArchiveInterfaceError(err_str)
def write(self, buf):
"""Write a HSM Sideband file to the archive."""
try:
if self._file:
return self._file.write(buf)
except Exception as ex:
err_str = "Can't write HSM Sideband file with error: " + str(ex)
raise ArchiveInterfaceError(err_str)
err_str = 'Internal file handle invalid'
raise ArchiveInterfaceError(err_str)
def set_mod_time(self, mod_time):
"""Set the mod time on a HSM file."""
try:
if self._filepath:
os.utime(self._filepath, (mod_time, mod_time))
except Exception as ex:
err_str = "Can't set HSM Sideband file mod time with error: " + \
str(ex)
raise ArchiveInterfaceError(err_str)
def set_file_permissions(self):
"""Set the file permissions for a posix file."""
try:
if self._filepath:
os.chmod(self._filepath, 0o444)
except Exception as ex:
err_str = "Can't set HSM Sideband file permissions with error: " + \
str(ex)
raise ArchiveInterfaceError(err_str)
def stage(self):
"""Stage a HSM Sideband file."""
try:
if self._file:
return self._file.stage()
except Exception as ex:
err_str = "Can't stage HSM Sideband file with error: " + str(ex)
raise ArchiveInterfaceError(err_str)
err_str = 'Internal file handle invalid'
raise ArchiveInterfaceError(err_str)
def status(self):
"""Get the status of a HSM Sideband file."""
try:
if self._file:
return self._file.status()
except Exception as ex:
err_str = "Can't get HSM Sideband file status with error: " + \
str(ex)
raise ArchiveInterfaceError(err_str)
err_str = 'Internal file handle invalid'
raise ArchiveInterfaceError(err_str)
def patch(self, file_id, old_path):
"""Move a hsm file."""
try:
fpath = un_abs_path(file_id)
new_filepath = os.path.join(self._prefix, path_info_munge(fpath))
new_directories = os.path.dirname(new_filepath)
if not os.path.exists(new_directories):
os.makedirs(new_directories)
shutil.move(old_path, new_filepath)
except Exception as ex:
err_str = "Can't move posix file with error: " + str(ex)
raise ArchiveInterfaceError(err_str)
def remove(self):
"""Remove the file for a posix file."""
try:
if self._filepath:
os.chmod(self._filepath, stat.S_IWRITE)
os.unlink(self._filepath)
self._filepath = None
except Exception as ex:
err_str = "Can't remove posix file with error: " + str(ex)
raise ArchiveInterfaceError(err_str)
| lgpl-3.0 | -162,728,773,794,844,740 | 36.101124 | 91 | 0.585554 | false | 4.044091 | false | false | false |
scholer/na_strand_model | nascent/graph_sim_nx/connected_multigraph.py | 2 | 2027 | # -*- coding: utf-8 -*-
## Copyright 2015 Rasmus Scholer Sorensen, [email protected]
##
## This file is part of Nascent.
##
## Nascent is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License as
## published by the Free Software Foundation, either version 3 of the
## License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=C0103,W0212
"""
Module docstring.
"""
import networkx as nx
class ConnectedMultiGraph(nx.MultiGraph):
"""
A NetworkX multigraph that makes it easy to break and merge
connected component graphs.
"""
def break_at_edge(self, source, target):
"""
Break graph at edge and see if it's still connected.
Four cases:
Case 0: Nothing breaks off
Case 1: Two smaller connected components.
Case 2: One node breaks off.
Case 3: Two separate nodes.
returns
Case-Int, [list of surviving graphs], [list of free nodes]
"""
self.remove_edge(source, target)
if len(self) == 2:
return 3, None, [source, target]
if len(self[source]) == 1:
self.remove_node(source)
return 2, self, [source]
if len(self[target]) == 1:
self.remove_node(target)
return 2, self, [target]
subgraphs = list(nx.connected_component_subgraphs(self, copy=False))
if len(subgraphs) == 1:
return 0, [self], None
else:
assert len(subgraphs) == 2
return 1, subgraphs, None
| gpl-3.0 | 3,514,739,809,222,993,400 | 32.783333 | 78 | 0.628022 | false | 3.928295 | false | false | false |
jhertfe/vvs-delay | testrun.py | 1 | 1249 | import os
import importlib
tests = []
for root, dirs, files in os.walk("testing"):
for file in files:
if file.endswith(".py") and not os.path.basename(file) == '__init__.py':
# print 'found test', os.path.join(root, file)
tests.append(os.path.join(root, file))
passed_or_failed = []
current_task = ''
for i, t in enumerate(tests):
try:
print '\n{}\n{:=^80}\n{}\n'.format('=' * 80, ' ' + t + ' ', '=' * 80)
current_task = 'importing {}'.format(t)
print '{}\n{}'.format('-' * 80, current_task)
t_path = t.replace('/','.')[:-3]
current_test = importlib.import_module(t_path)
print '## passed'
current_task = 'running tests'
print '{}\n{}\n##'.format('-' * 80, current_task)
failed = current_test.test()
passed_or_failed.append(True and not failed)
except Exception as e:
print 'FAILED'
passed_or_failed.append(False)
print 'EXCEPTION:', e
print '\n{}\n{:+^80}\n{}\n'.format('+' * 80, ' TEST RESULTS ', '+' * 80)
for i, t in enumerate(tests):
status = 'successfully' if passed_or_failed[i]\
else 'FAILED on {}'.format(current_task)
print 'finished test {}\n -> {}\n'.format(t, status)
| mit | 292,270,692,039,532,900 | 31.868421 | 80 | 0.551641 | false | 3.312997 | true | false | false |
Jonestj1/mbuild | mbuild/utils/visualization.py | 1 | 4433 | # this code is taken from: https://gist.github.com/mbostock/4339083
d3_tree_template = """
<!DOCTYPE html>
<meta charset="utf-8">
<style>
.node {
cursor: pointer;
}
.node circle {
fill: #fff;
stroke: steelblue;
stroke-width: 1.5px;
}
.node text {
font: 14px sans-serif;
}
.link {
fill: none;
stroke: #ccc;
stroke-width: 3px;
}
</style>
<body>
<script src="http://d3js.org/d3.v3.min.js"></script>
<script>
var margin = {top: 20, right: 120, bottom: 20, left: 120},
width = 960 - margin['right'] - margin['left'],
height = 800 - margin.top - margin.bottom;
var i = 0,
duration = 750,
root;
var tree = d3.layout.tree()
.size([height, width]);
var diagonal = d3.svg.diagonal()
.projection(function(d) { return [d.y, d.x]; });
var svg = d3.select("body").append("svg")
.attr("width", width + margin['right'] + margin['left'])
.attr("height", height + margin.top + margin.bottom)
.append("g")
.attr("transform", "translate(" + margin['left'] + "," + margin.top + ")");
var myjson = '%s'
root = JSON.parse( myjson );
root.x0 = height / 2;
root.y0 = 0;
function collapse(d) {
if (d.children) {
d._children = d.children;
d._children.forEach(collapse);
d.children = null;
}
}
root.children.forEach(collapse);
update(root);
d3.select(self.frameElement).style("height", "800px");
function update(source) {
// Compute the new tree layout.
var nodes = tree.nodes(root).reverse(),
links = tree.links(nodes);
// Normalize for fixed-depth.
nodes.forEach(function(d) { d.y = d.depth * 180; });
// Update the nodes
var node = svg.selectAll("g.node")
.data(nodes, function(d) { return d.id || (d.id = ++i); });
// Enter any new nodes at the parent's previous position.
var nodeEnter = node.enter().append("g")
.attr("class", "node")
.attr("transform", function(d) { return "translate(" + source.y0 + "," + source.x0 + ")"; })
.on("click", click);
nodeEnter.append("image")
.attr("xlink:href", function(d) { return d.icon; })
.attr("x", "-40px")
.attr("y", "-40px")
.attr("width", "80px")
.attr("height", "80px");
nodeEnter.append("circle")
.attr("r", 1e-6)
.attr("cx", "-2.2em")
.attr("cy", "-2em")
.style("fill", function(d) { return d._children ? "lightsteelblue" : "#fff"; });
nodeEnter.append("text")
.attr("x", -28)
.attr("dy", "-2em")
.attr("text-anchor", "start") .text(function(d) { return d.name; })
.style("fill-opacity", 1e-6);
// Transition nodes to their new position.
var nodeUpdate = node.transition()
.duration(duration)
.attr("transform", function(d) { return "translate(" + d.y + "," + d.x + ")"; });
nodeUpdate.select("circle")
.attr("r", 4.5)
.style("fill", function(d) { return d._children ? "lightsteelblue" : "#fff"; });
nodeUpdate.select("text")
.style("fill-opacity", 1);
// Transition exiting nodes to the parent's new position.
var nodeExit = node.exit().transition()
.duration(duration)
.attr("transform", function(d) { return "translate(" + source.y + "," + source.x + ")"; })
.remove();
nodeExit.select("circle")
.attr("r", 1e-6);
nodeExit.select("text")
.style("fill-opacity", 1e-6);
// Update the links
var link = svg.selectAll("path.link")
.data(links, function(d) { return d.target.id; });
// Enter any new links at the parent's previous position.
link.enter().insert("path", "g")
.attr("class", "link")
.attr("d", function(d) {
var o = {x: source.x0, y: source.y0};
return diagonal({source: o, target: o});
});
// Transition links to their new position.
link.transition()
.duration(duration)
.attr("d", diagonal);
// Transition exiting nodes to the parent's new position.
link.exit().transition()
.duration(duration)
.attr("d", function(d) {
var o = {x: source.x, y: source.y};
return diagonal({source: o, target: o});
})
.remove();
// Stash the old positions for transition.
nodes.forEach(function(d) {
d.x0 = d.x;
d.y0 = d.y;
});
}
// Toggle children on click.
function click(d) {
if (d.children) {
d._children = d.children;
d.children = null;
} else {
d.children = d._children;
d._children = null;
}
update(d);
}
</script>
"""
| mit | -5,776,204,275,036,614,000 | 23.627778 | 98 | 0.577036 | false | 3.078472 | false | false | false |
Justin-W/clifunland | src/clifunzone/xml_utils.py | 1 | 13355 | from collections import OrderedDict
from clifunzone import reflection_utils
from clifunzone import xml2json
try:
from lxml import etree as ET
except ImportError:
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
def contains_valid_xml(obj):
"""
Indicates whether a specified value contains valid and well-formed XML.
:param obj: a file-like or string-like object.
:return: True if valid, else False.
>>> contains_valid_xml(None)
False
>>> contains_valid_xml('')
False
>>> contains_valid_xml('<')
False
>>> contains_valid_xml('<xml />')
True
>>> contains_valid_xml('<constants><constant id="pi" value="3.14" /><constant id="zero">0</constant></constants>')
True
"""
if obj is None:
return False
try:
if reflection_utils.is_file_like(obj):
# read the contents of obj
obj = obj.read()
ET.fromstring(obj)
except ET.ParseError:
return False
return True
def load(obj):
"""
Parses a specified object using ElementTree.
:param obj: a file-like or string-like object.
:return: True if valid, else False.
>>> load(None)
Traceback (most recent call last):
ValueError
>>> load('') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
XMLSyntaxError: None
# Note: the exception will be different without lxml: ParseError: no element found: line 1, column 0
>>> load('<') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
XMLSyntaxError: StartTag: invalid element name, line 1, column 2
# Note: the exception will be different without lxml: ParseError: unclosed token: line 1, column 0
>>> load('<abc />').tag
'abc'
>>> load('<constants><constant id="pi" value="3.14" /><constant id="zero">0</constant></constants>').tag
'constants'
"""
if obj is None:
raise ValueError
if reflection_utils.is_file_like(obj):
# read the contents of obj
obj = obj.read()
return ET.fromstring(obj)
def xml_to_json(xmlstring, strip_attribute=False, strip_namespace=False, strip_whitespace=True, pretty=False):
r"""
Converts XML to JSON.
:param xmlstring: the XML string.
:param strip_attribute: If True, attributes will be ignored.
:param strip_namespace: If True, namespaces will be ignored.
:param strip_whitespace: If True, 'unimportant' whitespace will be ignored.
:param pretty: If True, the output will be pretty-formatted.
:return: a JSON string.
>>> xml_to_json(None) is None
True
>>> xml_to_json('')
Traceback (most recent call last):
ParseError: no element found: line 1, column 0
>>> xml_to_json('<')
Traceback (most recent call last):
ParseError: unclosed token: line 1, column 0
>>> xml_to_json('<a/>')
'{"a": null}'
>>> xml_to_json('<a/>', pretty=True)
'{\n "a": null\n}'
>>> xml_to_json('<constants><constant id="pi" value="3.14" />\n<constant id="zero">0</constant></constants>')
'{"constants": {"constant": [{"@id": "pi", "@value": "3.14"}, {"@id": "zero", "#text": "0"}]}}'
>>> xml_to_json('<z> <q qz="z" qy="y" /> <a az="z" ab="b" ay="y" /> <x/></z>', strip_whitespace=True)
'{"z": {"q": {"@qy": "y", "@qz": "z"}, "a": {"@ay": "y", "@az": "z", "@ab": "b"}, "x": null}}'
>>> xml_to_json('<royg> <r/> <o/> <y/> <r e="d"/> <g/></royg>', strip_whitespace=True)
'{"royg": {"r": [null, {"@e": "d"}], "o": null, "y": null, "g": null}}'
>>> xml_to_json('<a> <b\nid="b1" />\n<c/> <d> </d> </a>', strip_whitespace=False)
'{"a": {"b": {"@id": "b1", "#tail": "\\n"}, "c": {"#tail": " "}, "d": {"#tail": " ", "#text": " "}, "#text": " "}}'
>>> xml_to_json('<a> <b\nid="b1" />\n<c/> <d> </d> </a>', strip_whitespace=True)
'{"a": {"b": {"@id": "b1"}, "c": null, "d": null}}'
>>> xml_to_json("<a> <b\nid=\"b1\" />\n<c/> <d> </d> </a>", strip_namespace=False)
'{"a": {"b": {"@id": "b1"}, "c": null, "d": null}}'
>>> xml_to_json("<a> <b\nid=\"b1\" />\n<c/> <d> </d> </a>", strip_namespace=True)
'{"a": {"b": {"@id": "b1"}, "c": null, "d": null}}'
>>> xml_to_json('<royg> <r/> <o/> <y/> <r e="d"/> <g/></royg>', strip_whitespace=True, strip_attribute=True)
'{"royg": {"r": [null, null], "o": null, "y": null, "g": null}}'
>>> xml_to_json('<a> <b\nid="b1" />\n<c/> <d> </d> </a>', strip_whitespace=False, strip_attribute=True)
'{"a": {"b": {"#tail": "\\n"}, "c": {"#tail": " "}, "d": {"#tail": " ", "#text": " "}, "#text": " "}}'
>>> xml_to_json('<a> <b\nid="b1" />\n<c/> <d> </d> </a>', strip_whitespace=True, strip_attribute=True)
'{"a": {"b": null, "c": null, "d": null}}'
"""
if xmlstring is None:
return None
return xml2json.xml2json(xmlstring, strip_attribute=strip_attribute, strip_namespace=strip_namespace,
strip_whitespace=strip_whitespace, pretty=pretty)
# def etree_to_dict(t):
# d = {t.tag: map(etree_to_dict, t.iterchildren())}
# d.update(('@' + k, v) for k, v in t.attrib.iteritems())
# d['text'] = t.text
# return d
def element_info(element, tree=None):
"""
Returns a dict with (incomplete) info about a specified element/node.
:param element: an <ElementTree.Element> instance.
:return: a <collections.OrderedDict> instance.
"""
def get_distinct_tag_names(elements):
# convert to tag names
elements = [child.tag for child in elements]
# filter out duplicates
elements = set(elements)
return elements
def get_distinct_attribute_names(elements):
names = set()
for i in elements:
names.update(i.attrib.keys())
names = ('@' + k for k in names)
return names
d = OrderedDict()
if tree:
try:
d.update({'path': tree.getpath(element)})
except AttributeError:
# tree.getpath() is only available in lxml, not in the builtin xml.etree
# see: http://lxml.de/xpathxslt.html#xpath
# see: http://stackoverflow.com/a/13352109
pass
d2 = {'tag': element.tag}
if element.text:
d2.update({'#text': element.text})
if element.attrib:
# get all attribs
attribs = element.attrib.items()
# prefix attrib names
attribs = [('@' + k, v) for k, v in attribs]
# attribs = {k, v for k, v in attribs}
attribs = OrderedDict(attribs)
# if attribs:
# # d['attribs'] = {'tags': attribs, 'count': attribs_count}
# d['attribs'] = attribs
d2.update({'attributes': attribs})
d['content'] = d2
d['metrics'] = {}
# get all direct children
children = get_elements(element, xpath='./*')
children_count = len(children)
if children_count:
d2 = {'count': children_count}
d2.update({'tags': (sorted(get_distinct_tag_names(children)))})
d2.update({'attributes': (sorted(get_distinct_attribute_names(children)))})
d['metrics']['children'] = d2
# get all descendants
descendants = get_elements(element, xpath='.//*')
descendants_count = len(descendants)
if descendants_count:
d2 = {'count': descendants_count}
d2.update({'tags': (sorted(get_distinct_tag_names(descendants)))})
d2.update({'attributes': (sorted(get_distinct_attribute_names(descendants)))})
d['metrics']['descendants'] = d2
return d
def is_empty_element(elem):
"""
Indicates whether an XML Element object is 'empty'.
:param elem: an Element object
:return: True if elem is empty
"""
# return not bool(len(elem) or len(elem.attrib) or len(elem.text))
return not bool(len(elem) or elem.attrib or elem.text)
def is_parent_element(elem):
"""
Indicates whether an XML Element object has any children.
:param elem: an Element object
:return: True if elem has any child elements
"""
return len(elem)
def count_elements(obj, xpath=None):
"""
Returns a count of the XML elements that match a specified XPath expression.
This function encapsulates API differences between the lxml and ElementTree packages.
:param obj: a tree or element object
:param xpath: an XPath node set/selection expression
:return: an int
"""
if not xpath:
xpath = '//' # match all elements by default
# try lxml syntax first (much faster!)
try:
return int(obj.xpath('count({xpath})'.format(xpath=xpath)))
except AttributeError:
# AttributeError: 'ElementTree' object has no attribute 'xpath'
pass
# else try ElementTree syntax
if xpath.startswith('/'):
# ElementTree's findall() doesn't like xpath expressions that start with a '/'.
# e.g. "FutureWarning: This search is broken in 1.3 and earlier, and will be fixed in a future version. ..."
xpath = '.' + xpath
return len(obj.findall(xpath))
def get_elements(obj, xpath=None):
"""
Returns all XML elements that match a specified XPath expression.
This function encapsulates API differences between the lxml and ElementTree packages.
:param obj: a tree or element object
:param xpath: an XPath node set/selection expression
:return: an iterable
"""
if not xpath:
xpath = '//' # match all elements by default
# try lxml syntax first (much faster!)
try:
return obj.xpath(xpath)
except AttributeError:
# AttributeError: 'ElementTree' object has no attribute 'xpath'
pass
# else try ElementTree syntax
if xpath.startswith('/'):
# ElementTree's findall() doesn't like xpath expressions that start with a '/'.
# e.g. "FutureWarning: This search is broken in 1.3 and earlier, and will be fixed in a future version. ..."
xpath = '.' + xpath
return obj.findall(xpath)
def remove_elements(obj, xpath):
"""
Removes all XML elements that match a specified XPath expression.
This function encapsulates API differences between the lxml and ElementTree packages.
:param obj: a tree or element object
:param xpath: an XPath node set/selection expression
:return: an int count of the number of removed elements
"""
if not xpath:
raise ValueError('invalid xpath')
elements = get_elements(obj, xpath=xpath)
# count = len(elements)
count = 0
for i in elements:
# try lxml syntax first
try:
parent = i.getparent()
parent.remove(i)
except AttributeError:
# else try ElementTree syntax
obj.remove(i)
count += 1
return count
def remove_attributes_with_name(element, attrib_name):
"""
Removes all occurrences of a specific attribute from all elements.
:param element: an XML element object.
:param attrib_name: the name of the attribute to remove.
"""
if attrib_name.startswith('@'):
# remove the optional leading '@'
attrib_name = attrib_name[1:]
# find all elements that have the attribute
xpath = '//*[@{attrib}]'.format(attrib=attrib_name)
elements = get_elements(element, xpath=xpath)
for i in elements:
del i.attrib[attrib_name]
def remove_attributes_with_value(element, attrib_value):
"""
Removes all attributes with a specified value from all elements.
:param element: an XML element object.
:param attrib_value: the attribute value to match on.
"""
# find all elements that have 1+ matching attributes
xpath = '//*[@*="{value}"]'.format(value=(attrib_value.replace('"', '\\"')))
elements = get_elements(element, xpath=xpath)
for i in elements:
# determine the matching keys/attributes
keys = (k for k in i.attrib.keys() if i.attrib[k] == attrib_value)
for attrib_name in keys:
# remove each matching attribute from the current element
del i.attrib[attrib_name]
def remove_attributes_with_empty_value(element):
"""
Removes all attributes with an empty value from all elements.
:param element: an XML element object.
"""
remove_attributes_with_value(element, '')
# def remove_attributes_if(element, attrib_name, func):
# """
# Removes all occurrences of a specific attribute from all elements.
#
# :param element: an XML element object.
# :param attrib_name: the name of the attribute to remove.
# :param func: a predicate function (i.e. returns a bool) with the signature:
# f(attribute_name, attribute_value)
# """
# if attrib_name.startswith('@'):
# # remove the optional leading '@'
# attrib_name = attrib_name[1:]
# # find all elements that have the attribute
# xpath = '//*[@{attrib}]'.format(attrib=attrib_name)
# elements = xml_utils.get_elements(element, xpath=xpath)
# for i in elements:
# del i.attrib[attrib_name]
def main():
import doctest
fail, total = doctest.testmod(optionflags=(doctest.REPORT_NDIFF | doctest.REPORT_ONLY_FIRST_FAILURE))
print('Doctest: {f} FAILED ({p} of {t} PASSED).'.format(f=fail, p=(total - fail), t=total))
if __name__ == "__main__":
main()
| bsd-2-clause | -4,661,843,461,029,319,000 | 32.138958 | 119 | 0.600973 | false | 3.649904 | true | false | false |
bloomberg/phabricator-tools | py/phl/phlgitu_ref.py | 4 | 7288 | """Utilities for working with git refs."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# phlgitu_ref
#
# Public Classes:
# Error
# Name
# .short
# .fq
# .is_remote
#
# Public Functions:
# is_remote
# is_fq
# guess_fq_name
# make_remote
# make_local
# fq_remote_to_short_local
# fq_to_short
# is_under_remote
# is_fq_local_branch
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class Error(Exception):
pass
class Name(object):
"""Vocabulary type for git ref names to remove ambiguity in passing.
Usage examples:
>>> a = Name('refs/heads/master')
>>> a.short
'master'
>>> a.fq
'refs/heads/master'
>>> a.is_remote
False
>>> b = Name('refs/heads/master')
>>> c = Name('refs/remotes/origin/master')
>>> a == b
True
>>> a == c
False
>>> c.is_remote
True
>>> s = set([a, b, c])
>>> len(s)
2
"""
def __init__(self, fq_name):
super(Name, self).__init__()
if not is_fq(fq_name):
raise Error("'{}' is not fully qualified")
self._fq = fq_name
@property
def short(self):
return fq_to_short(self._fq)
@property
def fq(self):
return self._fq
@property
def is_remote(self):
return is_remote(self._fq)
def __eq__(self, right):
return self._fq.__eq__(right._fq)
def __hash__(self):
return self._fq.__hash__()
def is_remote(fq_name):
"""Return True if 'fq_name' is a remote branch, False otherwise.
Usage examples:
>>> is_remote('refs/heads/master')
False
>>> is_remote('refs/remotes/origin/master')
True
:name: string fully-qualified name of the ref to test
:returns: bool
"""
if not is_fq(fq_name):
raise Error("'{}' is not fully qualified")
return fq_name.startswith('refs/remotes/')
def is_fq(name):
"""Return True if the supplied 'name' is fully-qualified, False otherwise.
Usage examples:
>>> is_fq('master')
False
>>> is_fq('refs/heads/master')
True
:name: string name of the ref to test
:returns: bool
"""
return name.startswith('refs/')
def guess_fq_name(name_to_guess_from, remote_list=None):
"""Return a best-guess of the fq name of a ref, given a list of remotes.
The list of remotes defaults to ['origin'] if None is supplied.
Usage examples:
>>> guess_fq_name('master')
'refs/heads/master'
>>> guess_fq_name('origin/master')
'refs/remotes/origin/master'
>>> guess_fq_name('refs/notes')
'refs/notes'
:name_to_guess_from: string name of the ref
:remote_list: list of string names of remotes
"""
if not name_to_guess_from:
raise Error("empty name to guess from")
if is_fq(name_to_guess_from):
return name_to_guess_from
if remote_list is None:
remote_list = ['origin']
for r in remote_list:
if name_to_guess_from.startswith(r + '/'):
return "refs/remotes/{}".format(name_to_guess_from)
return "refs/heads/{}".format(name_to_guess_from)
def make_remote(ref, remote):
"""Return a Git reference based on a local name and a remote name.
Usage example:
>>> make_remote("mywork", "origin")
'refs/remotes/origin/mywork'
>>> make_remote("mywork", "github")
'refs/remotes/github/mywork'
"""
return "refs/remotes/" + remote + "/" + ref
def make_local(ref):
"""Return a fully qualified Git reference based on a local name.
Usage example:
>>> make_local("mywork")
'refs/heads/mywork'
"""
# TODO: check that it isn't already fully qualified
return "refs/heads/" + ref
def fq_remote_to_short_local(ref):
"""Return a short Git branch name based on a fully qualified remote branch.
Raise Error if the conversion can't be done.
Usage example:
>>> fq_remote_to_short_local("refs/remotes/origin/mywork")
'mywork'
>>> fq_remote_to_short_local("refs/heads/mywork")
Traceback (most recent call last):
Error: ref can't be converted to short local: mywork
"""
# convert to e.g. 'origin/mywork'
ref = fq_to_short(ref)
slash_pos = ref.find('/')
if slash_pos == -1:
raise Error("ref can't be converted to short local: {}".format(ref))
# disregard before and including the first slash
return ref[slash_pos + 1:]
def fq_to_short(ref):
"""Return a short Git reference based on a fully qualified name.
Raise Error if the conversion can't be done.
Usage example:
>>> fq_to_short("refs/heads/mywork")
'mywork'
>>> fq_to_short("refs/remotes/origin/mywork")
'origin/mywork'
"""
refs_heads = 'refs/heads/'
refs_remotes = 'refs/remotes/'
if ref.startswith(refs_heads):
return ref[len(refs_heads):]
if ref.startswith(refs_remotes):
return ref[len(refs_remotes):]
raise Error("ref can't be converted to short: {}".format(ref))
def is_under_remote(ref, remote):
"""Return True if a Git reference is from a particular remote, else False.
Note that behavior is undefined if the ref is not fully qualified, i.e.
does not begin with 'refs/'.
Usage example:
>>> is_under_remote("refs/remotes/origin/mywork", "origin")
True
>>> is_under_remote("refs/remotes/origin/mywork", "alt")
False
>>> is_under_remote("refs/headsmywork", "origin")
False
"""
return ref.startswith('refs/remotes/' + remote + '/')
def is_fq_local_branch(ref):
"""Return True if a Git reference is a fully qualified local branch.
Return False otherwise.
Usage example:
>>> is_fq_local_branch("refs/heads/master")
True
>>> is_fq_local_branch("refs/remotes/origin/master")
False
>>> is_fq_local_branch("refs/notes/commits")
False
"""
return ref.startswith('refs/heads/')
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| apache-2.0 | 6,136,878,137,985,886,000 | 23.87372 | 79 | 0.562569 | false | 3.837809 | false | false | false |
seongjaelee/rigvedawiki-parser | rigvedawiki/element.py | 1 | 1070 | class Element(object):
def __init__(self, parent, tag=None, attrib=None, close=True):
self.parent = parent
self.tag = tag
self.children = []
self.attrib = attrib if attrib != None else {}
self.close = close
if parent != None:
parent.append_child(self)
def append_child(self, e):
self.children.append(e)
return e
def get_parent(self):
return self.parent
def to_html(self):
ret = ''
if self.tag:
ret += '<%s' % self.tag
for k, v in self.attrib.items():
ret += ' %s="%s"' % (k, v)
ret += '>'
for child in self.children:
ret += child.to_html()
if self.tag and self.close:
ret += '</%s>' % self.tag
return ret
class TextElement(Element):
def __init__(self, parent, text = ''):
Element.__init__(self, parent, None)
self.text = text
def append_child(self, e):
assert False
def to_html(self):
return self.text
| mit | 2,558,418,487,808,359,000 | 24.47619 | 66 | 0.502804 | false | 3.876812 | false | false | false |
Jofemago/Computacion-Grafica | Librery/Ejercicios/Images.py | 3 | 1999 | import pygame
NEGRO =(0,0,0 )
class Img:
def __init__ (self, pantalla, img, x, y, Tampantalla ,fondo = None ):
self.img = pygame.image.load(img)
self.fondo = None
if fondo is not None:
self.fondo = pygame.image.load(fondo)
self.p = pantalla
self.x = x
self.y = y
self.AnAl = Tampantalla
self.DrawImg()
def DrawImg(self, Dx = 0, Dy = 0, pintar = False):
if(pintar):
self.p.fill(NEGRO)
if self.fondo is not None:
self.p.blit(self.fondo, [0,0])
self.x += Dx
self.y += Dy
self.ValidarLimites()
self.p.blit(self.img,[self.x , self.y ])
pygame.display.flip()
def ValidarLimites(self):
if self.y > self.AnAl[1]:
self.y = 0
if self.x > self.AnAl[0]:
self.x = 0
if self.y < 0:
self.y = self.AnAl[1]
if self.x < 0:
self.x = self.AnAl[0]
def MovTeclImg(self, event, pintar = False):
if event == pygame.K_DOWN:
self.DrawImg(0,10,pintar)
print 'abajo'
elif event == pygame.K_UP:
self.DrawImg(0, -10, pintar)
print 'arriba'
elif event == pygame.K_LEFT:
self.DrawImg(-10,0,pintar)
print 'izquierda'
elif event == pygame.K_RIGHT:
self.DrawImg(10, 0, pintar)
print 'derecha'
def HiddenMouse(self):
pygame.mouse.set_visible(False)
def ShowMouse(self):
pygame.mouse.set_visible(False)
def MovWhithMouse(self, pintar = False):
tam = self.img.get_rect()#obtengo ancho y alto de la imagen
pos = pygame.mouse.get_pos()
pos = [ pos[0] - tam[2]/2, pos[1] - tam[3]/2]
if(pintar):
self.p.fill(NEGRO)
if self.fondo is not None:
self.p.blit(self.fondo, [0,0])
self.p.blit(self.img,pos)
pygame.display.flip()
| mit | 1,460,225,638,242,428,700 | 21.715909 | 73 | 0.512256 | false | 3.010542 | false | false | false |
UCSD-CCAL/ccal | ccal/single_sample_gsea.py | 1 | 1912 | from warnings import warn
from numpy import absolute, in1d
from ._plot_mountain import _plot_mountain
def single_sample_gsea(
gene_score,
gene_set_genes,
statistic="ks",
plot=True,
title=None,
gene_score_name=None,
annotation_text_font_size=16,
annotation_text_width=88,
annotation_text_yshift=64,
html_file_path=None,
plotly_html_file_path=None,
):
gene_score = gene_score.dropna()
gene_score_sorted = gene_score.sort_values(ascending=False)
in_ = in1d(gene_score_sorted.index, gene_set_genes.dropna(), assume_unique=True)
in_sum = in_.sum()
if in_sum == 0:
warn("Gene scores did not have any of the gene-set genes.")
return
gene_score_sorted_values = gene_score_sorted.values
gene_score_sorted_values_absolute = absolute(gene_score_sorted_values)
in_int = in_.astype(int)
hit = (
gene_score_sorted_values_absolute * in_int
) / gene_score_sorted_values_absolute[in_].sum()
miss = (1 - in_int) / (in_.size - in_sum)
y = hit - miss
cumulative_sums = y.cumsum()
if statistic not in ("ks", "auc"):
raise ValueError("Unknown statistic: {}.".format(statistic))
if statistic == "ks":
max_ = cumulative_sums.max()
min_ = cumulative_sums.min()
if absolute(min_) < absolute(max_):
score = max_
else:
score = min_
elif statistic == "auc":
score = cumulative_sums.sum()
if plot:
_plot_mountain(
cumulative_sums,
in_,
gene_score_sorted,
score,
None,
None,
title,
gene_score_name,
annotation_text_font_size,
annotation_text_width,
annotation_text_yshift,
html_file_path,
plotly_html_file_path,
)
return score
| mit | -6,813,536,755,405,511,000 | 19.782609 | 84 | 0.572176 | false | 3.580524 | false | false | false |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/pywin32-219/Pythonwin/pywin/framework/cmdline.py | 24 | 1165 | # cmdline - command line utilities.
import sys
import win32ui
import string
def ParseArgs( str ):
import string
ret=[]
pos = 0
length=len(str)
while pos<length:
try:
while str[pos] in string.whitespace: pos = pos+1
except IndexError:
break
if pos>=length:
break
if str[pos]=='"':
pos=pos+1
try:
endPos = str.index('"', pos)-1
nextPos = endPos+2
except ValueError:
endPos=length
nextPos=endPos+1
else:
endPos = pos
while endPos<length and not str[endPos] in string.whitespace: endPos = endPos+1
nextPos=endPos+1
ret.append(str[pos:endPos+1].strip())
pos = nextPos
return ret
def FixArgFileName(fileName):
"""Convert a filename on the commandline to something useful.
Given an automatic filename on the commandline, turn it a python module name,
with the path added to sys.path. """
import os
path, fname = os.path.split(fileName)
if len(path)==0:
path = os.curdir
path=os.path.abspath(path)
# must check that the command line arg's path is in sys.path
for syspath in sys.path:
if os.path.abspath(syspath)==path:
break
else:
sys.path.append(path)
return os.path.splitext(fname)[0] | mit | -3,405,567,760,928,287,000 | 22.795918 | 82 | 0.694421 | false | 2.949367 | false | false | false |
tectronics/pygranule | pygranule/bidict.py | 2 | 3390 |
def reverse_dictionary(d):
rev_d = {}
for key in d:
val = d[key]
rev_d[val] = key
return rev_d
class BiDict(object):
"""
Bidirectional dictionary.
Allows inverse listings, value -> key
as well as usual key -> value.
Only supports one to one mapping,
i.e. unique keys and unique values.
"""
def __init__(self,dictionary=None, bare=False):
if dictionary is not None:
self.fwd = dictionary
self.bwd = reverse_dictionary(self.fwd)
elif bare == True:
pass
else:
self.fwd = {}
self.bwd = {}
def __str__(self):
return str(self.fwd)
def __getitem__(self, key):
return self.fwd[key]
def __setitem__(self, key, value):
self.fwd[key] = value
self.bwd[value] = key
def __delitem__(self, key):
self.remove(key)
def __invert__(self):
return self.inverse()
def __iter__(self):
return iter(self.fwd)
def __iadd__(self,other):
self.fwd.update(other.fwd)
self.bwd.update(other.bwd)
return self
def __len__(self):
return len(self.fwd)
def copy(self):
new = self.__class__(bare=True)
new.fwd = self.fwd.copy()
new.bwd = self.bwd.copy()
return new
def keys(self):
return self.fwd.keys()
def values(self):
return self.fwd.values()
def inverse(self):
"""
Returns an inverse BiDict of itself.
Data is still referenced from parent object.
Useful for listing data in reverse direction.
"""
new = self.__class__(bare=True)
new.fwd = self.bwd
new.bwd = self.fwd
return new
def remove(self, key):
val = self.fwd[key]
if val is not None:
del self.bwd[val]
del self.fwd[key]
return self
def reduce(self, external_dict):
"""
Removes those keys and values that correspond
to the keys in the external dict-like object.
Note that the values may differ in this and
external dict object may differ.
"""
for key in external_dict:
if key in self.fwd:
del self.bwd[self.fwd[key]]
del self.fwd[key]
return self
def update(self, external_dict):
"""
Updates this BiDict with any corresponding or new key/values
found in the external dict-like object.
"""
for key in external_dict:
if key in self.fwd:
# update the forward dict record
new_val = external_dict[key]
old_val = self.fwd[key]
self.fwd[key] = new_val
# remove reverse dict record
del self.bwd[old_val]
# and add the new one
self.bwd[new_val] = key
else:
self.fwd[key] = external_dict[key]
self.bwd[self.fwd[key]] = key
return self
def difference(self, external_dict):
"""
Return a BiDict containing all missing
key-values in the external dict-like object.
"""
new_dict = self.copy()
for key in external_dict:
if key in new_dict:
del new_dict[key]
return new_dict
| gpl-3.0 | -3,382,926,274,161,524,000 | 25.076923 | 68 | 0.526254 | false | 4.069628 | false | false | false |
mcaleavya/bcc-scripts | disk_qos.py | 1 | 6372 | #!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# qos implement a dynamic qos for using cgroups
# For Linux, uses BCC, eBPF.
#
# USAGE: qos.py [-h] [-qc] [--max] [interval]
# requires a file name qos_setup which can changed with qosfile
# file has format maj:min IOPS
# i.e. 8:0 40000
# Copyright (c) 2018 Allan McAleavy
# Licensed under the Apache License, Version 2.0 (the "License")
from __future__ import print_function
from bcc import BPF
from time import sleep, strftime
import argparse
import signal
import math
import collections
# arguments
examples = """examples:
./qos # block device I/O QOS, 1 second refresh
./qos --max 5000 # set max IOP limit for average I/O size lookup
./qos 5 # 5 second summaries
./qos --qc 5 # check for qos every 5 seconds
"""
parser = argparse.ArgumentParser(
description="Block device (disk) I/O by process and QOS",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-max", "--max", default=4000,
help="maximum IOPS")
parser.add_argument("interval", nargs="?", default=1,
help="output interval, in seconds")
parser.add_argument("count", nargs="?", default=99999999,
help="number of outputs")
parser.add_argument("-qc", "--qc", default=5,
help="QOS checktime")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
interval = int(args.interval)
countdown = int(args.count)
checktime = int(args.qc)
# linux stats
diskstats = "/proc/diskstats"
rfile = "/sys/fs/cgroup/blkio/blkio.throttle.read_iops_device"
wfile = "/sys/fs/cgroup/blkio/blkio.throttle.write_iops_device"
qosfile = "/root/bcc/tools/qos_setup"
# signal handler
def signal_ignore(signal, frame):
print()
def write_qos(dsk, typ, max_iops, sleepcnt):
if sleepcnt > checktime:
reload_qos(dsk)
if typ == "W":
with open(wfile, "w") as tf:
tf.write("%s %d" % (dsk, max_iops))
tf.close()
if typ == "R":
with open(rfile, "w") as tf:
tf.write("%s %d" % (dsk, max_iops))
tf.close()
# load qos settings at start
diskqos = {}
with open(qosfile) as stats:
for line in stats:
a = line.split()
diskqos[str(a[0])] = a[1]
def reload_qos(dsk):
with open(qosfile) as stats:
for line in stats:
a = line.split()
diskqos[str(a[0])] = a[1]
def do_qos(avg, iops, typ, dsk, sleepcnt):
if dsk in diskqos:
max_iops = int(diskqos[dsk])
else:
max_iops = int(args.max_iops)
costs = {4:100, 8:160, 16:270, 32:500, 64:1000, 128:1950, 256:3900, 512:7600, 1024:15000}
od = collections.OrderedDict(sorted(costs.items()))
average_iopsize = float(avg) / 1024
hbsize = 0
if average_iopsize >= 1:
hbsize = int(pow(2, math.ceil(math.log(average_iopsize, 2))))
if hbsize < 4:
hbsize = 4
lbsize = (hbsize / 2)
if lbsize < 4:
lbsize = 4
lbcost = od[lbsize]
hbcost = od[hbsize]
costep = float(hbcost - lbcost) / float(lbsize)
curcost = ((average_iopsize - lbsize) * costep) + lbcost
max_iops = (od[4] / float(curcost) * max_iops)
write_qos(dsk, typ, max_iops, sleepcnt)
return max_iops
# load BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/blkdev.h>
// the key for the output summary
struct info_t {
int rwflag;
int major;
int minor;
};
// the value of the output summary
struct val_t {
u64 bytes;
u32 io;
};
BPF_HASH(counts, struct info_t, struct val_t);
int trace_req_start(struct pt_regs *ctx, struct request *req)
{
struct val_t *valp, zero = {};
struct info_t info = {};
info.major = req->rq_disk->major;
info.minor = req->rq_disk->first_minor;
#ifdef REQ_WRITE
info.rwflag = !!(req->cmd_flags & REQ_WRITE);
#elif defined(REQ_OP_SHIFT)
info.rwflag = !!((req->cmd_flags >> REQ_OP_SHIFT) == REQ_OP_WRITE);
#else
info.rwflag = !!((req->cmd_flags & REQ_OP_MASK) == REQ_OP_WRITE);
#endif
if( info.major > 0 )
{
valp = counts.lookup_or_init(&info, &zero);
valp->bytes += req->__data_len;
valp->io++;
}
return 0;
}
"""
if args.ebpf:
print(bpf_text)
exit()
b = BPF(text=bpf_text)
b.attach_kprobe(event="blk_start_request", fn_name="trace_req_start")
b.attach_kprobe(event="blk_mq_start_request", fn_name="trace_req_start")
print('Tracing... Output every %d secs. Hit Ctrl-C to end' % interval)
disklookup = {}
with open(diskstats) as stats:
for line in stats:
a = line.split()
disklookup[a[0] + ":" + a[1]] = a[2]
exiting = 0
sleepcnt = 0
diskname = "???"
wiops = 0
riops = 0
ravg = 0
wavg = 0
rqos = 0
wqos = 0
wbytes = 0
rbytes = 0
print("%-8s %-5s %-8s %-8s %-8s %-8s %-8s %-8s %-8s %-8s" %
("TIME", "DISK", "RIOPS", "R MB/s", "R_AvgIO", "R_QOS",
"WIOPS", "W_AvgIO", "W MB/s", "W_QOS"))
while 1:
try:
sleep(interval)
except KeyboardInterrupt:
exiting = 1
counts = b.get_table("counts")
line = 0
for k, v in sorted(counts.items(), key=lambda counts: counts[1].bytes):
disk = str(k.major) + ":" + str(k.minor)
if disk in disklookup:
diskname = disklookup[disk]
else:
diskname = "???"
if v.io and v.bytes >= 1 and diskname is not "???":
if k.rwflag == 1:
wiops = v.io
wavg = (v.bytes / wiops)
wbytes = v.bytes
wqos = do_qos(wavg, wiops, "W", disk, sleepcnt)
else:
riops = v.io
ravg = (v.bytes / riops)
rbytes = v.bytes
rqos = do_qos(ravg, riops, "R", disk, sleepcnt)
print("%-8s %-5s %-8d %-8d %-8d %-8d %-8d %-8d %-8d %-8d" %
(strftime("%H:%M:%S"), diskname, riops, rbytes / 1048576, ravg / 1024,
rqos, wiops, wavg / 1024, wbytes / 1048576, wqos))
counts.clear()
wiops = 0
riops = 0
ravg = 0
wavg = 0
rqos = 0
wqos = 0
wbytes = 0
rbytes = 0
if sleepcnt > checktime:
sleepcnt = 0
else:
sleepcnt = sleepcnt + 1
countdown -= 1
if exiting or countdown == 0:
print("Detaching...")
exit()
| apache-2.0 | -2,773,605,776,312,134,000 | 26.465517 | 93 | 0.580352 | false | 2.983146 | false | false | false |
NKUCodingCat/NKU-SSS-in-One | prog/src/huping.py | 2 | 3764 | #coding=utf-8
import C
import urllib
import urllib2
import cookielib
import re
import sys
nID = ''
while 1:
nID = raw_input("Input your id and press Enter plz ")
if len(nID) != 7:
print 'wrong length of id,input again'
else:
break
Pass = raw_input("Input your password and press Enter plz ")
url = 'http://fuxue.nankai.edu.cn/index.php/assessment/question/mod/show'
urllogin = 'http://fuxue.nankai.edu.cn/index.php/Account/doLogin'
cj = cookielib.CookieJar()
pattern = re.compile(r'<h3>\S*:\S*')
pattern1 = re.compile(r'"[0-9]+" >\S*')
valueslogin ={
'Host':' fuxue.nankai.edu.cn',
'Connection':' keep-alive',
'Accept':' text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'User-Agent':' Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36',
'DNT':' 1',
'Accept-Encoding':' gzip,deflate,sdch',
'Accept-Language':' zh-CN,zh;q=0.8'
}
postdata = urllib.urlencode({'username':nID,'password':Pass})
req3 = urllib2.Request(urllogin,headers=valueslogin)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
response = opener.open(req3,postdata)
print 'Account Checking.........'
if not re.findall(re.escape("url=http://fuxue.nankai.edu.cn/index.php/index/index\' >"), response.read()):
print 'Password Error'
raw_input("Press Enter to continue")
sys.exit(0)
for cookie in cj:
cookie = cookie.value
IDStart = ''
while 1:
IDStart = raw_input("Input the first id you want to assess and press Enter plz ")
if len(IDStart) != 7:
print 'wrong length of id,input again'
else:
break
IDEnd = ''
while 1:
IDEnd = raw_input("Input the last id you want to assess and press Enter plz ")
if len(IDEnd) != 7:
print 'wrong length of id,input again'
else:
break
values = {
'Host':' fuxue.nankai.edu.cn',
'Connection':' keep-alive',
'Accept':' text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'User-Agent':' Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36',
'DNT':' 1',
'Referer':'http://fuxue.nankai.edu.cn/index.php/assessment/xnmSelfAssessment',
'Accept-Encoding':' gzip,deflate,sdch',
'Accept-Language':' zh-CN,zh;q=0.8',
'Cookie':' PHPSESSID='+cookie
}
IDS=int(IDStart)
IDE=int(IDEnd)
print 'connecting...................'
count = IDS
strup = 'http://fuxue.nankai.edu.cn/index.php/assessment/appraise_ajax'
Cook=' PHPSESSID='+cookie
for i in range(IDS,IDE+1):
Re='http://fuxue.nankai.edu.cn/index.php/assessment/appraise/num/'
values2 = {
'Host':' fuxue.nankai.edu.cn',
'Connection':' keep-alive',
'Accept':' */*',
'Origin':' http://fuxue.nankai.edu.cn',
'X-Requested-With':' XMLHttpRequest',
'User-Agent':' Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36',
'Content-Type':' application/x-www-form-urlencoded; charset=UTF-8',
'DNT':' 1',
'Referer':Re+str(count),
'Accept-Encoding':' gzip,deflate,sdch',
'Accept-Language':' zh-CN,zh;q=0.8',
'Cookie':Cook
}
'''
Search
'''
req4 = urllib2.Request((Re+str(count)),headers=values)
content2 = urllib2.urlopen(req4).read()
url2=(strup)
'''
Upload
'''
req = urllib2.Request(url2,headers=values2)
content = urllib2.urlopen(req,urllib.urlencode([('num',str(count)),('assproid','9'),('gong','6'),('neng1','6'),('neng2','6'),('neng3','6'),('neng4','6'),('neng5','6'),('good1',''),('good2',''),('good3',''),('bad1',''),('bad2',''),('bad3','')])).read()
print count
count=count + 1
raw_input("\nMission Complete\nPress Enter to continue") | gpl-2.0 | 646,683,881,656,747,300 | 29.915254 | 252 | 0.641073 | false | 2.675195 | false | false | false |
paragbaxi/qualysapi | qualysapi/config.py | 1 | 10177 | """ Module providing a single class (QualysConnectConfig) that parses a config
file and provides the information required to build QualysGuard sessions.
"""
import getpass
import logging
import os
import stat
from configparser import RawConfigParser
import qualysapi.settings as qcs
# Setup module level logging.
logger = logging.getLogger(__name__)
# try:
# from requests_ntlm import HttpNtlmAuth
# except ImportError, e:
# logger.warning('Warning: Cannot support NTML authentication.')
__author__ = "Parag Baxi <[email protected]> & Colin Bell <[email protected]>"
__copyright__ = "Copyright 2011-2013, Parag Baxi & University of Waterloo"
__license__ = "BSD-new"
class QualysConnectConfig:
""" Class to create a RawConfigParser and read user/password details
from an ini file.
"""
def __init__(
self,
filename=qcs.default_filename,
section="info",
remember_me=False,
remember_me_always=False,
username=None,
password=None,
hostname=None,
):
self._cfgfile = None
self._section = section
# Prioritize local directory filename.
# Check for file existence.
if os.path.exists(filename):
self._cfgfile = filename
elif os.path.exists(os.path.join(os.path.expanduser("~"), filename)):
# Set home path for file.
self._cfgfile = os.path.join(os.path.expanduser("~"), filename)
# create RawConfigParser to combine defaults and input from config file.
self._cfgparse = RawConfigParser(qcs.defaults)
if self._cfgfile:
self._cfgfile = os.path.realpath(self._cfgfile)
mode = stat.S_IMODE(os.stat(self._cfgfile)[stat.ST_MODE])
# apply bitmask to current mode to check ONLY user access permissions.
if (mode & (stat.S_IRWXG | stat.S_IRWXO)) != 0:
logger.warning("%s permissions allows more than user access.", filename)
self._cfgparse.read(self._cfgfile)
# if 'info'/ specified section doesn't exist, create the section.
if not self._cfgparse.has_section(self._section):
self._cfgparse.add_section(self._section)
# Use default hostname (if one isn't provided).
if not self._cfgparse.has_option(self._section, "hostname"):
if not hostname:
if self._cfgparse.has_option("DEFAULT", "hostname"):
hostname = self._cfgparse.get("DEFAULT", "hostname")
else:
raise Exception(
"No 'hostname' set. QualysConnect does not know who to connect to."
)
self._cfgparse.set(self._section, "hostname", hostname)
# Use default max_retries (if one isn't provided).
if not self._cfgparse.has_option(self._section, "max_retries"):
self.max_retries = qcs.defaults["max_retries"]
else:
self.max_retries = self._cfgparse.get(self._section, "max_retries")
try:
self.max_retries = int(self.max_retries)
except Exception:
logger.error("Value max_retries must be an integer.")
print("Value max_retries must be an integer.")
exit(1)
self._cfgparse.set(self._section, "max_retries", str(self.max_retries))
self.max_retries = int(self.max_retries)
# Get template ID... user will need to set this to pull back CSV reports
if not self._cfgparse.has_option(self._section, "template_id"):
self.report_template_id = qcs.defaults["template_id"]
else:
self.report_template_id = self._cfgparse.get(self._section, "template_id")
try:
self.report_template_id = int(self.report_template_id)
except Exception:
logger.error("Report Template ID Must be set and be an integer")
print("Value template ID must be an integer.")
exit(1)
self._cfgparse.set(self._section, "template_id", str(self.report_template_id))
self.report_template_id = int(self.report_template_id)
# Proxy support
proxy_config = (
proxy_url
) = proxy_protocol = proxy_port = proxy_username = proxy_password = None
# User requires proxy?
if self._cfgparse.has_option("proxy", "proxy_url"):
proxy_url = self._cfgparse.get("proxy", "proxy_url")
# Remove protocol prefix from url if included.
for prefix in ("http://", "https://"):
if proxy_url.startswith(prefix):
proxy_protocol = prefix
proxy_url = proxy_url[len(prefix) :]
# Default proxy protocol is http.
if not proxy_protocol:
proxy_protocol = "https://"
# Check for proxy port request.
if ":" in proxy_url:
# Proxy port already specified in url.
# Set proxy port.
proxy_port = proxy_url[proxy_url.index(":") + 1 :]
# Remove proxy port from proxy url.
proxy_url = proxy_url[: proxy_url.index(":")]
if self._cfgparse.has_option("proxy", "proxy_port"):
# Proxy requires specific port.
if proxy_port:
# Warn that a proxy port was already specified in the url.
proxy_port_url = proxy_port
proxy_port = self._cfgparse.get("proxy", "proxy_port")
logger.warning(
"Proxy port from url overwritten by specified proxy_port from config: %s --> %s",
proxy_port_url,
proxy_port,
)
else:
proxy_port = self._cfgparse.get("proxy", "proxy_port")
if not proxy_port:
# No proxy port specified.
if proxy_protocol == "http://":
# Use default HTTP Proxy port.
proxy_port = "8080"
else:
# Use default HTTPS Proxy port.
proxy_port = "443"
# Check for proxy authentication request.
if self._cfgparse.has_option("proxy", "proxy_username"):
# Proxy requires username & password.
proxy_username = self._cfgparse.get("proxy", "proxy_username")
proxy_password = self._cfgparse.get("proxy", "proxy_password")
# Not sure if this use case below is valid.
# # Support proxy with username and empty password.
# try:
# proxy_password = self._cfgparse.get('proxy','proxy_password')
# except NoOptionError, e:
# # Set empty password.
# proxy_password = ''
# Sample proxy config:f
# 'http://user:[email protected]:3128'
if proxy_url:
# Proxy requested.
proxy_config = proxy_url
if proxy_port:
# Proxy port requested.
proxy_config += f":{proxy_port}"
if proxy_username:
# Proxy authentication requested.
proxy_config = f"{proxy_username}:{proxy_password}@{proxy_config}"
# Prefix by proxy protocol.
proxy_config = proxy_protocol + proxy_config
# Set up proxy if applicable.
if proxy_config:
self.proxies = {"https": proxy_config}
else:
self.proxies = None
# ask username (if one doesn't exist)
if not self._cfgparse.has_option(self._section, "username"):
if not username:
# The next line will pass Bandit, which is required for issue B322:blacklist. QualysAPI no longer works with Python2, so this doesn't apply.
username = input("QualysGuard Username: ") # nosec
self._cfgparse.set(self._section, "username", username)
# ask password (if one doesn't exist)
if not self._cfgparse.has_option(self._section, "password"):
if not password:
password = getpass.getpass("QualysGuard Password: ")
self._cfgparse.set(self._section, "password", password)
logger.debug(self._cfgparse.items(self._section))
if remember_me or remember_me_always:
# Let's create that config file for next time...
# Where to store this?
if remember_me:
# Store in current working directory.
config_path = filename
if remember_me_always:
# Store in home directory.
config_path = os.path.expanduser("~")
if not os.path.exists(config_path):
# Write file only if it doesn't already exists.
# http://stackoverflow.com/questions/5624359/write-file-with-specific-permissions-in-python
mode = stat.S_IRUSR | stat.S_IWUSR # This is 0o600 in octal and 384 in decimal.
umask_original = os.umask(0)
try:
config_file = os.fdopen(
os.open(config_path, os.O_WRONLY | os.O_CREAT, mode), "w"
)
finally:
os.umask(umask_original)
# Add the settings to the structure of the file, and lets write it out...
self._cfgparse.write(config_file)
config_file.close()
def get_config_filename(self):
return self._cfgfile
def get_config(self):
return self._cfgparse
def get_auth(self):
""" Returns username from the configfile. """
return (
self._cfgparse.get(self._section, "username"),
self._cfgparse.get(self._section, "password"),
)
def get_hostname(self):
""" Returns hostname. """
return self._cfgparse.get(self._section, "hostname")
def get_template_id(self):
return self._cfgparse.get(self._section, "template_id")
| apache-2.0 | 2,183,132,515,875,634,700 | 41.053719 | 156 | 0.55822 | false | 4.326956 | true | false | false |
mmitkevich/softwatch | run.py | 1 | 2612 | #!/usr/bin/python2
import collections
import argparse
from softwatch import timelog
from softwatch import timenode
import sys
import time
import os
import glob
import traceback
import codecs
import locale
def run1(args):
run(args[0])
def run(args):
try:
#sys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout);
if sys.platform == "win32":
class UniStream(object):
__slots__= ("fobj", "softspace",)
def __init__(self, fileobject):
self.fobj = fileobject
#self.fileno = fileobject.fileno()
self.softspace = False
def write(self, text):
try:
fno = self.fobj.fileno()
os.write(fno, text.encode("cp866") if isinstance(text, unicode) else text)
except BaseException as e:
traceback.print_exc()
#self.fobj.write(text)
sys.stdout = UniStream(sys.stdout)
sys.stderr = UniStream(sys.stderr)
print "run "+str(args)
if args.action=='log':
aw = timelog.TimeLog(args)
aw.monitor_active_window()
return 0
if args.action=='report':
args.file = args.file or "*.log"
opts = timenode.TimeQuery(samples=args.samples, tree=args.tree)
opts.tasks = timenode.loadTasks(os.path.join(args.dir, "tasks.cat"))
opts.total = timenode.loadTasks(os.path.join(args.dir, "tasks.cat"))
opts.total.tag = "**ONLINE**"
opts.min_time = int(args.duration*60000)
opts.min_percent = float(args.percent)
opts.relative = args.relative
if args.begin:
opts.min_start = int((time.time()-3600*24*float(args.begin))*1000)
if args.end:
opts.max_start = int((time.time()-3600*24*float(args.end))*1000)
print "directory:"+str(args.dir)+", file:"+args.file
logfiles = [ f for f in glob.glob(os.path.join(args.dir,args.file)) if os.path.isfile(os.path.join(args.file,f)) ]
logfiles = sorted(logfiles)
for f in logfiles:
if (os.path.getmtime(f)*1000-opts.min_start>=0):
print "processing "+f
opts.process_file(f)
# else:
# print "skipped "+f
taglist = args.pattern #[0].split(' ')
print taglist
opts.total.query(set(taglist),opts)
return 0
except BaseException as e:
traceback.print_exc()
var = traceback.format_exc()
f=open("err","w")
f.write(str(e)+"\n"+var)
f.close()
print var
return 1
| mit | -7,720,414,205,021,691,000 | 30.853659 | 122 | 0.574655 | false | 3.592847 | false | false | false |
niwinz/niwi-web | src/niwi/web/forms.py | 1 | 1506 | # -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from django import forms
import datetime
LEXER_CHOICES = (
('', '---------'),
('text', 'Text plain'),
('c', 'C'),
('cpp', 'C++'),
('d', 'D'),
('csharp', 'C#'),
('go', 'Go'),
('java', 'Java'),
('py', 'Python 2.x'),
('py3', 'Python 3.x'),
('php', 'PHP'),
('pl', 'Perl'),
('rb', 'Ruby'),
('vala', 'Vala'),
('css', 'CSS'),
('html', 'HTML/XHTML'),
('js', 'JavaScript'),
('xml', 'XML'),
('html+php', 'HTML+PHP'),
('html+django', 'HTML+Django'),
)
class PasteForm(forms.Form):
paste = forms.CharField(widget=forms.Textarea, label=_(u"Paste content"))
lexer = forms.ChoiceField(choices=LEXER_CHOICES, label=_(u"Lexer"))
title = forms.CharField(max_length=100, required=False, label=_(u"Title"))
group = forms.CharField(max_length=50, required=False, label=_(u"Group"))
def __init__(self, *args, **kwargs):
self._request = kwargs.pop('request')
super(PasteForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned_data = self.cleaned_data
real_ip = "HTTP_X_REAL_IP" in self._request.META and \
self._request.META['HTTP_X_REAL_IP'] or None
if not real_ip:
if "REMOTE_HOST" not in self._request.META:
real_ip = "127.0.0.1"
else:
real_ip = self._request.META['REMOTE_HOST']
return cleaned_data
| bsd-3-clause | -5,487,374,588,739,876,000 | 27.415094 | 78 | 0.527224 | false | 3.281046 | false | false | false |
AnTAVR/aai2 | src/libs/db/__init__.py | 1 | 3102 | from typing import Optional, Generator, Any, List
from .db_lib import DbInterface
from .db_types import FontMapsList, FontUnimapsList, FontsList, KeymapsList, DomainsList, TimezonesList, LocalesList
class DbFontMaps(DbInterface):
db_file_name = 'font_maps.csv'
row_class = FontMapsList
lines: List[FontMapsList]
def get(self, value: str, item: str = None) -> Optional[FontMapsList]:
return super().get(value, item)
def get_all(self, value: str, item: str = None) -> Generator[FontMapsList, Any, None]:
# noinspection PyTypeChecker
return super().get_all(value, item)
class DbFontUnimaps(DbInterface):
db_file_name = 'font_unimaps.csv'
row_class = FontUnimapsList
lines: List[FontUnimapsList]
def get(self, value: str, item: str = None) -> Optional[FontUnimapsList]:
return super().get(value, item)
def get_all(self, value: str, item: str = None) -> Generator[FontUnimapsList, Any, None]:
# noinspection PyTypeChecker
return super().get_all(value, item)
class DbFonts(DbInterface):
db_file_name = 'fonts.csv'
row_class = FontsList
lines: List[FontsList]
def get(self, value: str, item: str = None) -> Optional[FontsList]:
return super().get(value, item)
def get_all(self, value: str, item: str = None) -> Generator[FontsList, Any, None]:
# noinspection PyTypeChecker
return super().get_all(value, item)
class DbKeymaps(DbInterface):
db_file_name = 'keymaps.csv'
row_class = KeymapsList
lines: List[KeymapsList]
def get(self, value: str, item: str = None) -> Optional[KeymapsList]:
return super().get(value, item)
def get_all(self, value: str, item: str = None) -> Generator[KeymapsList, Any, None]:
# noinspection PyTypeChecker
return super().get_all(value, item)
class DbDomains(DbInterface):
db_file_name = 'domains.csv'
row_class = DomainsList
lines: List[DomainsList]
def get(self, value: str, item: str = None) -> Optional[DomainsList]:
return super().get(value, item)
def get_all(self, value: str, item: str = None) -> Generator[DomainsList, Any, None]:
# noinspection PyTypeChecker
return super().get_all(value, item)
class DbTimezones(DbInterface):
db_file_name = 'timezones.csv'
row_class = TimezonesList
lines: List[TimezonesList]
def get(self, value: str, item: str = None) -> Optional[TimezonesList]:
return super().get(value, item)
def get_all(self, value: str, item: str = None) -> Generator[TimezonesList, Any, None]:
# noinspection PyTypeChecker
return super().get_all(value, item)
class DbLocales(DbInterface):
db_file_name = 'locales.csv'
row_class = LocalesList
lines: List[LocalesList]
def get(self, value: str, item: str = None) -> Optional[LocalesList]:
return super().get(value, item)
def get_all(self, value: str, item: str = None) -> Generator[LocalesList, Any, None]:
# noinspection PyTypeChecker
return super().get_all(value, item)
| gpl-2.0 | 6,703,015,428,604,923,000 | 29.411765 | 116 | 0.660864 | false | 3.442841 | false | false | false |
teamfx/openjfx-9-dev-rt | modules/javafx.web/src/main/native/Tools/Scripts/webkitpy/tool/bot/irc_command.py | 1 | 12984 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import itertools
import random
import re
from webkitpy.common.config import irc as config_irc
from webkitpy.common.config import urls
from webkitpy.common.config.committers import CommitterList
from webkitpy.common.net.web import Web
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.bot.queueengine import TerminateQueue
from webkitpy.tool.grammar import join_with_separators
from webkitpy.tool.grammar import pluralize
def _post_error_and_check_for_bug_url(tool, nicks_string, exception):
tool.irc().post("%s" % exception)
bug_id = urls.parse_bug_id(exception.output)
if bug_id:
bug_url = tool.bugs.bug_url_for_bug_id(bug_id)
tool.irc().post("%s: Ugg... Might have created %s" % (nicks_string, bug_url))
# FIXME: Merge with Command?
class IRCCommand(object):
usage_string = None
help_string = None
def execute(self, nick, args, tool, sheriff):
raise NotImplementedError("subclasses must implement")
@classmethod
def usage(cls, nick):
return "%s: Usage: %s" % (nick, cls.usage_string)
@classmethod
def help(cls, nick):
return "%s: %s" % (nick, cls.help_string)
class CreateBug(IRCCommand):
usage_string = "create-bug BUG_TITLE"
help_string = "Creates a Bugzilla bug with the given title."
def execute(self, nick, args, tool, sheriff):
if not args:
return self.usage(nick)
bug_title = " ".join(args)
bug_description = "%s\nRequested by %s on %s." % (bug_title, nick, config_irc.channel)
# There happens to be a committers list hung off of Bugzilla, so
# re-using that one makes things easiest for now.
requester = tool.bugs.committers.contributor_by_irc_nickname(nick)
requester_email = requester.bugzilla_email() if requester else None
try:
bug_id = tool.bugs.create_bug(bug_title, bug_description, cc=requester_email, assignee=requester_email)
bug_url = tool.bugs.bug_url_for_bug_id(bug_id)
return "%s: Created bug: %s" % (nick, bug_url)
except Exception, e:
return "%s: Failed to create bug:\n%s" % (nick, e)
class Help(IRCCommand):
usage_string = "help [COMMAND]"
help_string = "Provides help on my individual commands."
def execute(self, nick, args, tool, sheriff):
if args:
for command_name in args:
if command_name in commands:
self._post_command_help(nick, tool, commands[command_name])
else:
tool.irc().post("%s: Available commands: %s" % (nick, ", ".join(sorted(visible_commands.keys()))))
tool.irc().post('%s: Type "%s: help COMMAND" for help on my individual commands.' % (nick, sheriff.name()))
def _post_command_help(self, nick, tool, command):
tool.irc().post(command.usage(nick))
tool.irc().post(command.help(nick))
aliases = " ".join(sorted(filter(lambda alias: commands[alias] == command and alias not in visible_commands, commands)))
if aliases:
tool.irc().post("%s: Aliases: %s" % (nick, aliases))
class Hi(IRCCommand):
usage_string = "hi"
help_string = "Responds with hi."
def execute(self, nick, args, tool, sheriff):
if len(args) and re.match(sheriff.name() + r'_*\s*!\s*', ' '.join(args)):
return "%s: hi %s!" % (nick, nick)
if sheriff.name() == 'WKR': # For some unknown reason, WKR can't use tool.bugs.quips().
return "You're doing it wrong"
quips = tool.bugs.quips()
quips.append('"Only you can prevent forest fires." -- Smokey the Bear')
return random.choice(quips)
class PingPong(IRCCommand):
usage_string = "ping"
help_string = "Responds with pong."
def execute(self, nick, args, tool, sheriff):
return nick + ": pong"
class YouThere(IRCCommand):
usage_string = "yt?"
help_string = "Responds with yes."
def execute(self, nick, args, tool, sheriff):
return "%s: yes" % nick
class Restart(IRCCommand):
usage_string = "restart"
help_string = "Restarts sherrifbot. Will update its WebKit checkout, and re-join the channel momentarily."
def execute(self, nick, args, tool, sheriff):
tool.irc().post("Restarting...")
raise TerminateQueue()
class Rollout(IRCCommand):
usage_string = "rollout SVN_REVISION [SVN_REVISIONS] REASON"
help_string = "Opens a rollout bug, CCing author + reviewer, and attaching the reverse-diff of the given revisions marked as commit-queue=?."
def _extract_revisions(self, arg):
revision_list = []
possible_revisions = arg.split(",")
for revision in possible_revisions:
revision = revision.strip()
if not revision:
continue
revision = revision.lstrip("r")
# If one part of the arg isn't in the correct format,
# then none of the arg should be considered a revision.
if not revision.isdigit():
return None
revision_list.append(int(revision))
return revision_list
def _parse_args(self, args):
if not args:
return (None, None)
svn_revision_list = []
remaining_args = args[:]
# First process all revisions.
while remaining_args:
new_revisions = self._extract_revisions(remaining_args[0])
if not new_revisions:
break
svn_revision_list += new_revisions
remaining_args = remaining_args[1:]
# Was there a revision number?
if not len(svn_revision_list):
return (None, None)
# Everything left is the reason.
rollout_reason = " ".join(remaining_args)
return svn_revision_list, rollout_reason
def _responsible_nicknames_from_revisions(self, tool, sheriff, svn_revision_list):
commit_infos = map(tool.checkout().commit_info_for_revision, svn_revision_list)
nickname_lists = map(sheriff.responsible_nicknames_from_commit_info, commit_infos)
return sorted(set(itertools.chain(*nickname_lists)))
def _nicks_string(self, tool, sheriff, requester_nick, svn_revision_list):
# FIXME: _parse_args guarentees that our svn_revision_list is all numbers.
# However, it's possible our checkout will not include one of the revisions,
# so we may need to catch exceptions from commit_info_for_revision here.
target_nicks = [requester_nick] + self._responsible_nicknames_from_revisions(tool, sheriff, svn_revision_list)
return ", ".join(target_nicks)
def _update_working_copy(self, tool):
tool.scm().discard_local_changes()
tool.executive.run_and_throw_if_fail(tool.deprecated_port().update_webkit_command(), quiet=True, cwd=tool.scm().checkout_root)
def _check_diff_failure(self, error_log, tool):
if not error_log:
return None
revert_failure_message_start = error_log.find("Failed to apply reverse diff for revision")
if revert_failure_message_start == -1:
return None
lines = error_log[revert_failure_message_start:].split('\n')[1:]
files = list(itertools.takewhile(lambda line: tool.filesystem.exists(tool.scm().absolute_path(line)), lines))
if files:
return "Failed to apply reverse diff for %s: %s" % (pluralize(len(files), "file", showCount=False), ", ".join(files))
return None
def execute(self, nick, args, tool, sheriff):
svn_revision_list, rollout_reason = self._parse_args(args)
if (not svn_revision_list or not rollout_reason):
return self.usage(nick)
revision_urls_string = join_with_separators([urls.view_revision_url(revision) for revision in svn_revision_list])
tool.irc().post("%s: Preparing rollout for %s ..." % (nick, revision_urls_string))
self._update_working_copy(tool)
# FIXME: IRCCommand should bind to a tool and have a self._tool like Command objects do.
# Likewise we should probably have a self._sheriff.
nicks_string = self._nicks_string(tool, sheriff, nick, svn_revision_list)
try:
complete_reason = "%s (Requested by %s on %s)." % (
rollout_reason, nick, config_irc.channel)
bug_id = sheriff.post_rollout_patch(svn_revision_list, complete_reason)
bug_url = tool.bugs.bug_url_for_bug_id(bug_id)
tool.irc().post("%s: Created rollout: %s" % (nicks_string, bug_url))
except ScriptError, e:
tool.irc().post("%s: Failed to create rollout patch:" % nicks_string)
diff_failure = self._check_diff_failure(e.output, tool)
if diff_failure:
return "%s: %s" % (nicks_string, diff_failure)
_post_error_and_check_for_bug_url(tool, nicks_string, e)
class Whois(IRCCommand):
usage_string = "whois SEARCH_STRING"
help_string = "Searches known contributors and returns any matches with irc, email and full name. Wild card * permitted."
def _full_record_and_nick(self, contributor):
result = ''
if contributor.irc_nicknames:
result += ' (:%s)' % ', :'.join(contributor.irc_nicknames)
if contributor.can_review:
result += ' (r)'
elif contributor.can_commit:
result += ' (c)'
return unicode(contributor) + result
def execute(self, nick, args, tool, sheriff):
if not args:
return self.usage(nick)
search_string = unicode(" ".join(args))
# FIXME: We should get the ContributorList off the tool somewhere.
contributors = CommitterList().contributors_by_search_string(search_string)
if not contributors:
return unicode("%s: Sorry, I don't know any contributors matching '%s'.") % (nick, search_string)
if len(contributors) > 5:
return unicode("%s: More than 5 contributors match '%s', could you be more specific?") % (nick, search_string)
if len(contributors) == 1:
contributor = contributors[0]
if not contributor.irc_nicknames:
return unicode("%s: %s hasn't told me their nick. Boo hoo :-(") % (nick, contributor)
return unicode("%s: %s is %s. Why do you ask?") % (nick, search_string, self._full_record_and_nick(contributor))
contributor_nicks = map(self._full_record_and_nick, contributors)
contributors_string = join_with_separators(contributor_nicks, only_two_separator=" or ", last_separator=', or ')
return unicode("%s: I'm not sure who you mean? %s could be '%s'.") % (nick, contributors_string, search_string)
# FIXME: Lame. We should have an auto-registering CommandCenter.
visible_commands = {
"create-bug": CreateBug,
"help": Help,
"hi": Hi,
"ping": PingPong,
"restart": Restart,
"rollout": Rollout,
"whois": Whois,
"yt?": YouThere,
}
# Add revert as an "easter egg" command. Why?
# revert is the same as rollout and it would be confusing to list both when
# they do the same thing. However, this command is a very natural thing for
# people to use and it seems silly to have them hunt around for "rollout" instead.
commands = visible_commands.copy()
commands["revert"] = Rollout
# "hello" Alias for "hi" command for the purposes of testing aliases
commands["hello"] = Hi
| gpl-2.0 | -4,076,264,880,319,106,000 | 41.29316 | 145 | 0.651494 | false | 3.774419 | true | false | false |
smartczm/python-learn | Old-day01-10/s13-day9/Socket/ftp_socket/c.py | 1 | 1626 | #!/usr/bin/env python3.5
# -*- coding: utf-8 -*-
# Author: ChenLiang
import socket
import os
import json
ip_port = ('127.0.0.1', 8009)
# 买手机
s = socket.socket()
# 拨号
s.connect(ip_port)
# 发送消息
welcome_msg = s.recv(1024)
print("from server:", welcome_msg.decode())
while True:
send_data = input(">>: ").strip()
if len(send_data) == 0: continue
cmd_list = send_data.split()
if len(cmd_list) < 2: continue
task_type = cmd_list[0]
if task_type == 'put':
abs_filepath = cmd_list[1]
if os.path.isfile(abs_filepath):
file_size = os.stat(abs_filepath).st_size
filename = abs_filepath.split("\\")[-1]
print('file:%s size:%s' % (abs_filepath, file_size))
msg_data = {"action": "put",
"filename": filename,
"file_size": file_size}
s.send(bytes(json.dumps(msg_data), encoding="utf-8"))
server_confirmation_msg = s.recv(1024)
confirm_data = json.loads(server_confirmation_msg.decode())
if confirm_data['status'] == 200:
print("start sending file ", filename)
f = open(abs_filepath, 'rb')
for line in f:
s.send(line)
print("send file done ")
else:
print("\033[31;1mfile [%s] is not exist\033[0m" % abs_filepath)
continue
else:
print("doesn't support task type", task_type)
continue
# 收消息
recv_data = s.recv(1024)
print(str(recv_data, encoding='utf8'))
# 挂电话
s.close()
| gpl-2.0 | -4,102,936,458,862,545,000 | 27.5 | 75 | 0.534461 | false | 3.250509 | false | false | false |
sergey-dryabzhinsky/dedupsqlfs | dedupsqlfs/db/mysql/table/tmp_ids.py | 1 | 1719 | # -*- coding: utf8 -*-
__author__ = 'sergey'
from dedupsqlfs.db.mysql.table import Table
class TableTmpIds( Table ):
_engine = "MEMORY"
_table_name = "tmp_ids"
def create( self ):
c = self.getCursor()
# Create table
c.execute(
"CREATE TABLE IF NOT EXISTS `%s` (" % self.getName()+
"`id` BIGINT UNSIGNED PRIMARY KEY"+
")"+
self._getCreationAppendString()
)
return
def insert( self, some_id):
"""
:return: int
"""
self.startTimer()
cur = self.getCursor()
cur.execute(
"INSERT INTO `%s` " % self.getName()+
" (`id`) VALUES (%(id)s)",
{
"id": some_id,
}
)
item = cur.lastrowid
self.stopTimer('insert')
return item
def find( self, some_id):
"""
:param some_id: int
:return: int
"""
self.startTimer()
cur = self.getCursor()
cur.execute(
"SELECT `id` FROM `%s` " % self.getName()+
" WHERE `id`=%(id)s",
{
"id": some_id
}
)
item = cur.fetchone()
self.stopTimer('find')
return item
def get_ids_by_ids(self, ids):
self.startTimer()
ret_ids = ()
id_str = ",".join(ids)
if id_str:
cur = self.getCursor()
cur.execute("SELECT `id` FROM `%s` " % self.getName()+
" WHERE `id` IN (%s)" % (id_str,))
ret_ids = set(str(item["id"]) for item in cur)
self.stopTimer('get_ids_by_ids')
return ret_ids
pass
| mit | -4,263,189,194,209,451,000 | 22.22973 | 66 | 0.441536 | false | 3.81153 | false | false | false |
tommy-u/chaco | examples/demo/quiver.py | 2 | 1961 | """
Draws a vector or "quiver" plot of a set of random points.
- Left-drag pans the plot.
- Mousewheel up and down zooms the plot in and out.
- Pressing "z" brings up the Zoom Box, and you can click-drag a rectangular
region to zoom. If you use a sequence of zoom boxes, pressing alt-left-arrow
and alt-right-arrow moves you forwards and backwards through the "zoom
history".
"""
# Major library imports
from numpy import array, sort
from numpy.random import random
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import HasTraits, Instance, Int
from traitsui.api import Item, View
# Chaco imports
from chaco.api import add_default_grids, add_default_axes, ArrayPlotData, \
Plot, OverlayPlotContainer
from chaco.tools.api import PanTool, ZoomTool
class PlotExample(HasTraits):
plot = Instance(Component)
numpts = Int(400)
vectorlen = Int(15)
traits_view = View(Item('plot', editor=ComponentEditor(), show_label=False),
width=600, height=600)
def _plot_default(self):
# Create starting points for the vectors.
numpts = self.numpts
x = sort(random(numpts))
y = random(numpts)
# Create vectors.
vectorlen = self.vectorlen
vectors = array((random(numpts)*vectorlen, random(numpts)*vectorlen)).T
data = ArrayPlotData()
data.set_data('index', x)
data.set_data('value', y)
data.set_data('vectors', vectors)
quiverplot = Plot(data)
quiverplot.quiverplot(('index', 'value', 'vectors'))
# Attach some tools to the plot
quiverplot.tools.append(PanTool(quiverplot, constrain_key="shift"))
zoom = ZoomTool(quiverplot)
quiverplot.overlays.append(zoom)
container = OverlayPlotContainer(quiverplot, padding=50)
return container
demo = PlotExample()
if __name__ == "__main__":
demo.configure_traits()
| bsd-3-clause | 5,563,889,532,120,993,000 | 29.640625 | 80 | 0.674146 | false | 3.624769 | false | false | false |
wooga/airflow | airflow/providers/google/cloud/hooks/life_sciences.py | 1 | 5714 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hook for Google Cloud Life Sciences service"""
import time
from typing import Any, Dict, Optional
import google.api_core.path_template
from googleapiclient.discovery import build
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
# Time to sleep between active checks of the operation results
TIME_TO_SLEEP_IN_SECONDS = 5
# noinspection PyAbstractClass
class LifeSciencesHook(GoogleBaseHook):
"""
Hook for the Google Cloud Life Sciences APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param api_version: API version used (for example v1 or v1beta1).
:type api_version: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
"""
_conn = None # type: Optional[Any]
def __init__(
self,
api_version: str = "v2beta",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None
) -> None:
super().__init__(gcp_conn_id, delegate_to)
self.api_version = api_version
def get_conn(self):
"""
Retrieves the connection to Cloud Life Sciences.
:return: Google Cloud Life Sciences service object.
"""
if not self._conn:
http_authorized = self._authorize()
self._conn = build("lifesciences", self.api_version,
http=http_authorized, cache_discovery=False)
return self._conn
@GoogleBaseHook.fallback_to_default_project_id
def run_pipeline(self, body: Dict, location: str, project_id: str):
"""
Runs a pipeline
:param body: The request body.
:type body: dict
:param location: The location of the project. For example: "us-east1".
:type location: str
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:rtype: dict
"""
parent = self._location_path(project_id=project_id, location=location)
service = self.get_conn()
request = (service.projects() # pylint: disable=no-member
.locations()
.pipelines()
.run(parent=parent, body=body)
)
response = request.execute(num_retries=self.num_retries)
# wait
operation_name = response['name']
self._wait_for_operation_to_complete(operation_name)
return response
@GoogleBaseHook.fallback_to_default_project_id
def _location_path(self, project_id: str, location: str):
"""
Return a location string.
:param project_id: Optional, Google Cloud Project project_id where the
function belongs. If set to None or missing, the default project_id
from the GCP connection is used.
:type project_id: str
:param location: The location of the project. For example: "us-east1".
:type location: str
"""
return google.api_core.path_template.expand(
'projects/{project}/locations/{location}',
project=project_id,
location=location,
)
def _wait_for_operation_to_complete(self, operation_name: str) -> None:
"""
Waits for the named operation to complete - checks status of the
asynchronous call.
:param operation_name: The name of the operation.
:type operation_name: str
:return: The response returned by the operation.
:rtype: dict
:exception: AirflowException in case error is returned.
"""
service = self.get_conn()
while True:
operation_response = (service.projects() # pylint: disable=no-member
.locations()
.operations()
.get(name=operation_name)
.execute(num_retries=self.num_retries))
self.log.info('Waiting for pipeline operation to complete')
if operation_response.get("done"):
response = operation_response.get("response")
error = operation_response.get("error")
# Note, according to documentation always either response or error is
# set when "done" == True
if error:
raise AirflowException(str(error))
return response
time.sleep(TIME_TO_SLEEP_IN_SECONDS)
| apache-2.0 | 5,205,677,005,183,493,000 | 37.093333 | 96 | 0.630382 | false | 4.392006 | false | false | false |
frostasm/retext | ReText/editor.py | 1 | 9116 | # vim: noexpandtab:ts=4:sw=4
# This file is part of ReText
# Copyright: Dmitry Shachnev 2012-2015
# License: GNU GPL v2 or higher
from ReText import monofont, globalSettings, tablemode, DOCTYPE_MARKDOWN
from PyQt5.QtCore import QPoint, QSize, Qt
from PyQt5.QtGui import QColor, QPainter, QPalette, QTextCursor, QTextFormat
from PyQt5.QtWidgets import QLabel, QTextEdit, QWidget
def documentIndentMore(document, cursor, globalSettings=globalSettings):
if cursor.hasSelection():
block = document.findBlock(cursor.selectionStart())
end = document.findBlock(cursor.selectionEnd()).next()
cursor.beginEditBlock()
while block != end:
cursor.setPosition(block.position())
if globalSettings.tabInsertsSpaces:
cursor.insertText(' ' * globalSettings.tabWidth)
else:
cursor.insertText('\t')
block = block.next()
cursor.endEditBlock()
else:
indent = globalSettings.tabWidth - (cursor.positionInBlock()
% globalSettings.tabWidth)
if globalSettings.tabInsertsSpaces:
cursor.insertText(' ' * indent)
else:
cursor.insertText('\t')
def documentIndentLess(document, cursor, globalSettings=globalSettings):
if cursor.hasSelection():
block = document.findBlock(cursor.selectionStart())
end = document.findBlock(cursor.selectionEnd()).next()
else:
block = document.findBlock(cursor.position())
end = block.next()
cursor.beginEditBlock()
while block != end:
cursor.setPosition(block.position())
if document.characterAt(cursor.position()) == '\t':
cursor.deleteChar()
else:
pos = 0
while document.characterAt(cursor.position()) == ' ' \
and pos < globalSettings.tabWidth:
pos += 1
cursor.deleteChar()
block = block.next()
cursor.endEditBlock()
class ReTextEdit(QTextEdit):
def __init__(self, parent):
QTextEdit.__init__(self)
self.parent = parent
self.undoRedoActive = False
self.tableModeEnabled = False
self.setFont(monofont)
self.setAcceptRichText(False)
self.marginx = (self.cursorRect(self.cursorForPosition(QPoint())).topLeft().x()
+ self.fontMetrics().width(" "*globalSettings.rightMargin))
self.lineNumberArea = LineNumberArea(self)
self.infoArea = InfoArea(self)
self.document().blockCountChanged.connect(self.updateLineNumberAreaWidth)
self.updateLineNumberAreaWidth()
self.cursorPositionChanged.connect(self.highlightCurrentLine)
self.document().contentsChange.connect(self.contentsChange)
def paintEvent(self, event):
if not globalSettings.rightMargin:
return QTextEdit.paintEvent(self, event)
painter = QPainter(self.viewport())
painter.setPen(QColor(220, 210, 220))
y1 = self.rect().topLeft().y()
y2 = self.rect().bottomLeft().y()
painter.drawLine(self.marginx, y1, self.marginx, y2)
QTextEdit.paintEvent(self, event)
def scrollContentsBy(self, dx, dy):
QTextEdit.scrollContentsBy(self, dx, dy)
self.lineNumberArea.repaint()
def lineNumberAreaPaintEvent(self, event):
painter = QPainter(self.lineNumberArea)
painter.fillRect(event.rect(), Qt.cyan)
cursor = QTextCursor(self.document())
cursor.movePosition(QTextCursor.Start)
atEnd = False
while not atEnd:
rect = self.cursorRect(cursor)
block = cursor.block()
if block.isVisible():
number = str(cursor.blockNumber() + 1)
painter.setPen(Qt.darkCyan)
painter.drawText(0, rect.top(), self.lineNumberArea.width()-2,
self.fontMetrics().height(), Qt.AlignRight, number)
cursor.movePosition(QTextCursor.EndOfBlock)
atEnd = cursor.atEnd()
if not atEnd:
cursor.movePosition(QTextCursor.NextBlock)
def getHighlighter(self):
return self.parent.highlighters[self.parent.ind]
def contextMenuEvent(self, event):
text = self.toPlainText()
dictionary = self.getHighlighter().dictionary
if (dictionary is None) or not text:
return QTextEdit.contextMenuEvent(self, event)
oldcursor = self.textCursor()
cursor = self.cursorForPosition(event.pos())
pos = cursor.positionInBlock()
if pos == len(text): pos -= 1
curchar = text[pos]
isalpha = curchar.isalpha()
cursor.select(QTextCursor.WordUnderCursor)
if not isalpha or (oldcursor.hasSelection() and
oldcursor.selectedText() != cursor.selectedText()):
return QTextEdit.contextMenuEvent(self, event)
self.setTextCursor(cursor)
word = cursor.selectedText()
if not word or dictionary.check(word):
self.setTextCursor(oldcursor)
return QTextEdit.contextMenuEvent(self, event)
suggestions = dictionary.suggest(word)
actions = [self.parent.act(sug, trig=self.fixWord(sug)) for sug in suggestions]
menu = self.createStandardContextMenu()
menu.insertSeparator(menu.actions()[0])
for action in actions[::-1]:
menu.insertAction(menu.actions()[0], action)
menu.exec(event.globalPos())
def fixWord(self, correctword):
return lambda: self.insertPlainText(correctword)
def keyPressEvent(self, event):
key = event.key()
cursor = self.textCursor()
if event.text() and self.tableModeEnabled:
cursor.beginEditBlock()
if key == Qt.Key_Tab:
documentIndentMore(self.document(), cursor)
elif key == Qt.Key_Backtab:
documentIndentLess(self.document(), cursor)
elif key == Qt.Key_Return and not cursor.hasSelection():
if event.modifiers() & Qt.ShiftModifier:
# Insert Markdown-style line break
markupClass = self.parent.getMarkupClass()
if markupClass and markupClass.name == DOCTYPE_MARKDOWN:
cursor.insertText(' ')
if event.modifiers() & Qt.ControlModifier:
cursor.insertText('\n')
else:
self.handleReturn(cursor)
else:
QTextEdit.keyPressEvent(self, event)
if event.text() and self.tableModeEnabled:
cursor.endEditBlock()
def handleReturn(self, cursor):
# Select text between the cursor and the line start
cursor.movePosition(QTextCursor.StartOfBlock, QTextCursor.KeepAnchor)
text = cursor.selectedText()
length = len(text)
pos = 0
while pos < length and (text[pos] in (' ', '\t')
or text[pos:pos+2] in ('* ', '- ')):
pos += 1
# Reset the cursor
cursor = self.textCursor()
cursor.insertText('\n'+text[:pos])
self.ensureCursorVisible()
def lineNumberAreaWidth(self):
if not globalSettings.lineNumbersEnabled:
return 0
cursor = QTextCursor(self.document())
cursor.movePosition(QTextCursor.End)
digits = len(str(cursor.blockNumber() + 1))
return 5 + self.fontMetrics().width('9') * digits
def updateLineNumberAreaWidth(self, blockcount=0):
self.lineNumberArea.repaint()
self.setViewportMargins(self.lineNumberAreaWidth(), 0, 0, 0)
def resizeEvent(self, event):
QTextEdit.resizeEvent(self, event)
rect = self.contentsRect()
self.lineNumberArea.setGeometry(rect.left(), rect.top(),
self.lineNumberAreaWidth(), rect.height())
self.infoArea.updateTextAndGeometry()
def highlightCurrentLine(self):
if not globalSettings.highlightCurrentLine:
return self.setExtraSelections([])
selection = QTextEdit.ExtraSelection();
lineColor = QColor(255, 255, 200)
selection.format.setBackground(lineColor)
selection.format.setProperty(QTextFormat.FullWidthSelection, True)
selection.cursor = self.textCursor()
selection.cursor.clearSelection()
self.setExtraSelections([selection])
def enableTableMode(self, enable):
self.tableModeEnabled = enable
def backupCursorPositionOnLine(self):
return self.textCursor().positionInBlock()
def restoreCursorPositionOnLine(self, positionOnLine):
cursor = self.textCursor()
cursor.setPosition(cursor.block().position() + positionOnLine)
self.setTextCursor(cursor)
def contentsChange(self, pos, removed, added):
if self.tableModeEnabled:
markupClass = self.parent.getMarkupClass()
docType = markupClass.name if markupClass else None
cursorPosition = self.backupCursorPositionOnLine()
tablemode.adjustTableToChanges(self.document(), pos, added - removed, docType)
self.restoreCursorPositionOnLine(cursorPosition)
class LineNumberArea(QWidget):
def __init__(self, editor):
QWidget.__init__(self, editor)
self.editor = editor
def sizeHint(self):
return QSize(self.editor.lineNumberAreaWidth(), 0)
def paintEvent(self, event):
if globalSettings.lineNumbersEnabled:
return self.editor.lineNumberAreaPaintEvent(event)
class InfoArea(QLabel):
def __init__(self, editor):
QWidget.__init__(self, editor)
self.editor = editor
self.editor.cursorPositionChanged.connect(self.updateTextAndGeometry)
self.updateTextAndGeometry()
self.setAutoFillBackground(True)
palette = self.palette()
palette.setColor(QPalette.Window, QColor(0xaa, 0xff, 0x55, 0xaa))
self.setPalette(palette)
def updateTextAndGeometry(self):
text = self.getText()
self.setText(text)
viewport = self.editor.viewport()
metrics = self.fontMetrics()
width = metrics.width(text)
height = metrics.height()
self.resize(width, height)
rightSide = viewport.width() + self.editor.lineNumberAreaWidth()
self.move(rightSide - width, viewport.height() - height)
self.setVisible(not globalSettings.useFakeVim)
def getText(self):
template = '%d : %d'
cursor = self.editor.textCursor()
block = cursor.blockNumber() + 1
position = cursor.positionInBlock()
return template % (block, position)
| gpl-3.0 | 7,018,855,425,577,568,000 | 33.270677 | 81 | 0.740237 | false | 3.244128 | false | false | false |
hoh/Billabong | billabong/core.py | 1 | 3462 | # Copyright (c) 2015 "Hugo Herter http://hugoherter.com"
#
# This file is part of Billabong.
#
# Intercom is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""High-level interface above inventory and stores."""
import os.path
from uuid import uuid4
from base64 import b64encode, b64decode
from datetime import datetime
import time
import magic
from .encryption import random_key, copy_and_encrypt, decrypt_blob
from .check import compute_hash
class Billabong:
"""High-level interface above inventory and stores."""
def __init__(self, inventory, stores):
"""Initialize a Billabong object with given inventory and stores."""
self.inventory = inventory
self.stores = stores
def add_file(self, filepath, *, key=None, tags=[]):
"""Import a file into Billabong and return the corresponding record."""
# Resolve symlinks
realpath = os.path.realpath(filepath)
if not os.path.isfile(realpath):
raise FileNotFoundError
if key is None:
key = random_key()
with open(realpath, 'rb') as source_file:
file_hash = compute_hash(source_file)
storage = self.stores[0]
blob_hash = copy_and_encrypt(storage, realpath, key)
record = {
'key': b64encode(key).decode(),
'hash': 'sha256-' + file_hash.hexdigest(),
'blob': blob_hash,
'size': os.path.getsize(realpath),
'datetime': datetime.utcnow(),
'timestamp': time.time(),
'id': uuid4().hex,
'info': {
'type': magic.from_file(realpath),
'mimetype': magic.from_file(realpath, mime=True),
'filename': os.path.basename(filepath),
'path': filepath,
'tags': tags if tags else [],
}
}
self.inventory.save_record(record)
return record
def get(self, id_):
"""Return a Record object from an id_."""
return self.inventory.get_record(id_)
def delete(self, id_):
"""Delete a Record and the corresponding blob."""
blob_id = self.inventory.get_record(id_)['blob']
for store in self.stores:
try:
store.delete(blob_id)
except (NotImplementedError, FileNotFoundError):
pass
self.inventory.delete(id_)
def read(self, id_, length=None, offset=0, chunk_size=1024):
"""Return data from the blob of this file."""
record = self.inventory.get_record(id_)
key = b64decode(record['key'])
blob_id = record['blob']
for store in self.stores:
if store.contains(blob_id):
return decrypt_blob(store, blob_id, key,
offset=offset, length=length)
raise FileNotFoundError
| agpl-3.0 | 4,422,107,054,092,524,000 | 32.61165 | 79 | 0.612074 | false | 4.206561 | false | false | false |
maduma/wism | welcome.py | 1 | 3041 | # Copyright 2015 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, json, requests
from flask import Flask, jsonify, request
app = Flask(__name__)
VCAP_SERVICES = "{\"language_translation\":[{\"name\":\"Language Translation-bc\",\"label\":\"language_translation\",\"tags\":[\"watson\",\"ibm_created\",\"ibm_dedicated_public\"],\"plan\":\"standard\",\"credentials\":{\"url\":\"https://gateway.watsonplatform.net/language-translation/api\",\"username\":\"b22a962b-bb4f-489c-b66e-3d572f5659cb\",\"password\":\"xjzQ1pI4BvUE\"}}]}"
def getVcapServices():
if 'VCAP_SERVICES' in os.environ:
val = os.environ['VCAP_SERVICES']
else: # local machine
val = VCAP_SERVICES
return json.loads(val)
def callLanguageTranslation(api, data=None):
services = getVcapServices()
url = services['language_translation'][0]['credentials']['url']
user = services['language_translation'][0]['credentials']['username']
password = services['language_translation'][0]['credentials']['password']
auth = (user, password)
headers = {'accept': 'application/json'}
if not data:
r = requests.get(url + api, auth=auth, headers=headers)
val = r.json()
else:
r = requests.post(url + api, json=data, auth=auth, headers=headers)
val = r.json()
return jsonify(val)
@app.route('/', methods=['GET', 'POST'])
def Welcome():
if request.method == 'POST':
text = request.form['wiym']
data = {'source': 'fr', 'target': 'en', 'text': [text]}
return callLanguageTranslation(VCAP_SERVICES, '/v2/translate', data)
else:
return app.send_static_file('index.html')
@app.route('/myapp')
def WelcomeToMyapp():
return callLanguageTranslation('/v2/identifiable_languages')
@app.route('/myapp1')
def WelcomeToMyapp1():
data = {'source': 'fr', 'target': 'en', 'text': ['Bonjour mon pote']}
return callLanguageTranslation('/v2/translate', data)
@app.route('/env')
def GetEnv():
val = {'ENV': dict(os.environ)}
val['VCAP_SERVICES'] = getVcapServices()
return jsonify(val)
@app.route('/api/people')
def GetPeople():
list = [
{'name': 'John', 'age': 28},
{'name': 'Bill', 'val': 26}
]
return jsonify(results=list)
@app.route('/api/people/<name>')
def SayHello(name):
message = {
'message': 'Hello ' + name
}
return jsonify(results=message)
port = os.getenv('PORT', '5000')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(port), debug=True)
| apache-2.0 | 1,431,935,181,249,914,000 | 34.776471 | 379 | 0.652417 | false | 3.39019 | false | false | false |
texttochange/vusion-backend | vusion/component/tests/test_flying_message_manager.py | 1 | 1160 | from bson import ObjectId
from redis import Redis
from twisted.trial.unittest import TestCase
from vusion.component import FlyingMessageManager
class FlyingMessageTestCase(TestCase):
def setUp(self):
self.redis = Redis()
self.prefix_key = 'unittest:testprogram'
self.fm = FlyingMessageManager(
self.prefix_key,
self.redis)
def tearDown(self):
self.clearData()
def clearData(self):
keys = self.redis.keys("%s:*" % self.prefix_key)
for key in keys:
self.redis.delete(key)
def test_append_get(self):
history_id = ObjectId()
self.fm.append_message_data('1', history_id, 3, 'ack')
saved_history_id, credit, status = self.fm.get_message_data('1')
self.assertEqual(history_id, saved_history_id)
self.assertEqual(credit, 3)
self.assertEqual(status, 'ack')
def test_append_get_not_present(self):
saved_history_id, credit, status = self.fm.get_message_data('1')
self.assertTrue(saved_history_id is None)
self.assertTrue(credit == 0)
self.assertTrue(status is None) | bsd-3-clause | -9,167,631,510,292,101,000 | 28.769231 | 72 | 0.631897 | false | 3.754045 | true | false | false |
sergiopasra/megaradrp | megaradrp/processing/cube.py | 2 | 13175 | #
# Copyright 2017-2021 Universidad Complutense de Madrid
#
# This file is part of Megara DRP
#
# SPDX-License-Identifier: GPL-3.0+
# License-Filename: LICENSE.txt
#
"""
Interpolation method based on:
'Hex-Splines: A Novel Spline Family for Hexagonal Lattices'
van de Ville et al. IEEE Transactions on Image Processing 2004, 13, 6
"""
from __future__ import print_function
import numpy as np
from scipy import signal
import astropy.units as u
import astropy.wcs
from numina.frame.utils import copy_img
from megaradrp.instrument.focalplane import FocalPlaneConf
# from megaradrp.datamodel import MegaraDataModel
from megaradrp.core.utils import atleast_2d_last
import megaradrp.processing.fixrss as fixrss
import megaradrp.processing.hexgrid as hg
import megaradrp.processing.hexspline as hspline
import megaradrp.instrument.constants as cons
# Helper function for equivalence conversion
GTC_PLATESCALE = u.plate_scale(cons.GTC_FC_A_PLATESCALE)
# Size scale of the spaxel grid in arcseconds
HEX_SCALE = cons.SPAXEL_SCALE.to(u.arcsec, GTC_PLATESCALE).value
def calc_matrix_from_fiberconf(fpconf, refid=614):
"""
Compute hexagonal grid matrix from FocalPlaneConf
Parameters
----------
fpconf : megaradrp.instrument.focalplane.FocalPlaneConf
refid : int
fiber ID of reference fiber for grid coordinates
Returns
-------
"""
# TODO: This should be in FIBERCONFS...
spos1_x = []
spos1_y = []
for fiber in fpconf.connected_fibers():
spos1_x.append(fiber.x)
spos1_y.append(fiber.y)
spos1_x = np.asarray(spos1_x)
spos1_y = np.asarray(spos1_y)
# FIBER in LOW LEFT corner is 614
ref_fiber = fpconf.fibers[refid]
minx, miny = ref_fiber.x, ref_fiber.y
if fpconf.funit == 'arcsec':
# arcsec
ascale = HEX_SCALE
else:
# mm
# fpconf.funit == 'mm'
ascale = cons.SPAXEL_SCALE.to(u.mm).value
ref = minx / ascale, miny / ascale
rpos1_x = (spos1_x - minx) / ascale
rpos1_y = (spos1_y - miny) / ascale
r0l_1 = np.array([rpos1_x, rpos1_y])
return r0l_1, ref
def create_cube(r0l, zval, p=1, target_scale=1.0):
"""
Parameters
----------
r0l
zval
p : {1, 2}
target_scale : float, optional
Returns
-------
Raises
------
ValueError
If `p` > 2
"""
# geometry
# Interpolation method. Allowed values are:
# P = 1 NN
# P = 2 Linear
if p > 2:
raise ValueError('p > 2 not implemented')
rr1 = target_scale * np.array([[1.0, 0], [0, 1]]) # Unit scale
# compute extremes of hexgrid to rectangular grid
# with pixel size 'scale'
(i1min, i1max), (j1min, j1max) = hg.hexgrid_extremes(r0l, target_scale)
# Rectangular grid
mk1 = np.arange(i1min, i1max + 1)
mk2 = np.arange(j1min, j1max + 1)
crow = len(mk1)
ccol = len(mk2)
# Result image
# Add third last axis
zval2 = atleast_2d_last(zval)
# disp axis is last axis...
dk = np.zeros((crow, ccol, zval2.shape[-1]))
# print('result shape is ', dk.shape)
# r1k = rr1 @ sk
sk = np.flipud(np.transpose([np.tile(mk1, len(mk2)), np.repeat(mk2, len(mk1))]).T) # x y
r1k = np.dot(rr1, sk)
# Prefiltering
# For p = 1, prefilter coefficients with p = 1, coeff = 1
# For p = 2, prefilter coefficients with p = 2, coeff = 1
# No prefiltering in zval2 is required if p <= 2
rbs = hspline.rescaling_kernel(p, scale=target_scale)
# Loop to compute integrals...
for s, r in zip(sk.T, r1k.T):
allpos = -(r0l - r[:, np.newaxis])
we = np.abs((rbs.ev(allpos[1], allpos[0])))
dk[s[1] - i1min, s[0] - j1min] = np.sum(we[:, np.newaxis] * zval2, axis=0)
# Postfiltering
# For p = 1, final image in NN, postfilter coefficients with n = 1
# For p = 2, final image is linear, postfilter coefficients with n = 3
#
if p == 1:
# Coefficients post filtering to n = 2 * p - 1 == 1
cpk = dk
# Nearest-neighbor samples equal to coefficients
img = cpk
elif p == 2:
# Coefficients post filtering to n = 2 * p - 1 == 3
cpk = np.zeros_like(dk)
# last axis
for k in range(dk.shape[-1]):
cpk[..., k] = signal.cspline2d(dk[..., k])
# Linear samples equal to coefficients
img = cpk
else:
raise ValueError('p > 2 not implemented')
return img
def create_cube_from_array(rss_data, fiberconf, p=1, target_scale_arcsec=1.0, conserve_flux=True):
"""
Create a cube array from a 2D or 1D array and focal plane configuration
Parameters
----------
rss_data
fiberconf : megaradrp.instrument.focalplane.FocalPlaneConf
p : {1, 2}
target_scale_arcsec : float
conserve_flux : bool
Returns
-------
np.ndarray
"""
target_scale = target_scale_arcsec / HEX_SCALE
conected = fiberconf.connected_fibers()
rows = [conf.fibid - 1 for conf in conected]
rss_data = atleast_2d_last(rss_data)
region = rss_data[rows, :]
r0l, _ = calc_matrix_from_fiberconf(fiberconf)
cube_data = create_cube(r0l, region[:, :], p, target_scale)
if conserve_flux:
# scale with areas
cube_data *= (target_scale ** 2 / hg.HA_HEX)
# Move axis to put WL first
# so that is last in FITS
result = np.moveaxis(cube_data, 2, 0)
result.astype('float32')
return result
def create_cube_from_rss(rss, p=1, target_scale_arcsec=1.0, conserve_flux=True):
"""
Create a cube HDUlist from a RSS HDUList
Parameters
----------
rss : fits.HDUList
p : {1, 2}
target_scale_arcsec : float, optional
conserve_flux : bool, optional
Returns
-------
fits.HDUList
"""
fiberconf = FocalPlaneConf.from_img(rss)
result_arr = create_cube_from_array(
rss[0].data, fiberconf, p=p,
target_scale_arcsec=target_scale_arcsec,
conserve_flux=conserve_flux
)
cube = copy_img(rss)
cube[0].data = result_arr
sky_header = rss['FIBERS'].header.copy()
spec_header = rss[0].header
# Update values of sky WCS
# CRPIX1, CRPIX2
# CDELT1, CDELT2
# minx, miny
# After shifting the array
# refpixel is -i1min, -j1min
target_scale = target_scale_arcsec / HEX_SCALE
r0l, (refx, refy) = calc_matrix_from_fiberconf(fiberconf)
(i1min, i1max), (j1min, j1max) = hg.hexgrid_extremes(r0l, target_scale)
crpix_x = -refx / target_scale - j1min
crpix_y = -refy / target_scale - i1min
# Map the center of original field
sky_header['CRPIX1'] = crpix_x
sky_header['CRPIX2'] = crpix_y
sky_header['CDELT1'] = -target_scale_arcsec / 3600.0
sky_header['CDELT2'] = target_scale_arcsec / 3600.0
# Merge headers
# 2D from FIBERS
# WL from PRIMARY
merge_wcs(sky_header, spec_header, out=cube[0].header)
# done
return cube
def merge_wcs(hdr_sky, hdr_spec, out=None):
"""Merge sky WCS with spectral WCS
Works only with main WCS and B WCS
"""
if out is None:
hdr = hdr_spec.copy()
else:
hdr = out
allw = astropy.wcs.find_all_wcs(hdr_spec)
wcsnames = [w.wcs.alt for w in allw]
for ss in wcsnames:
merge_wcs_alt(hdr_sky, hdr_spec, hdr, spec_suffix=ss)
return hdr
def merge_wcs_alt(hdr_sky, hdr_spec, out, spec_suffix=' '):
"""Merge sky WCS with spectral WCS"""
hdr = out
if spec_suffix == ' ':
sf = ''
else:
sf = spec_suffix
# Extend header for third axis
c_crpix = 'Pixel coordinate of reference point'
c_cunit = 'Units of coordinate increment and value'
wcsname_s = f'WCSNAME{sf}'
if wcsname_s in hdr:
prev = wcsname_s
else:
prev = f'CTYPE1{sf}'
hdr.set(f'WCSAXES{sf}', value=3, before=prev)
if sf != '':
hdr.set(f'WCSNAME{sf}', value='', after='PC3_3')
hdr.set(f'CTYPE1{sf}', value='', after=f'WCSNAME{sf}')
hdr.set(f'CRPIX1{sf}', value=1.0, after=f'CTYPE1{sf}')
hdr.set(f'CRVAL1{sf}', value=0.0, after=f'CRPIX1{sf}')
hdr.set(f'CDELT1{sf}', value=1.0, after=f'CRVAL1{sf}')
hdr.set(f'CUNIT1{sf}', value='deg', comment=c_cunit, after=f'CDELT1{sf}')
hdr.set(f'CTYPE2{sf}', after=f'CUNIT1{sf}')
if sf != '':
hdr.set(f'CRPIX2{sf}', value=1.0, after=f'CTYPE2{sf}')
hdr.set(f'CRVAL2{sf}', value=0.0, after=f'CRPIX2{sf}')
hdr.set(f'CDELT2{sf}', value=1.0, after=f'CRVAL2{sf}')
hdr.set(f'CUNIT2{sf}', value='deg', comment=c_cunit, after=f'CDELT2{sf}')
hdr.set(f'CRPIX2{sf}', value=1, comment=c_crpix, after=f'CTYPE2{sf}')
hdr.set(f'CTYPE3{sf}', after=f'CUNIT2{sf}')
hdr.set(f'CRPIX3{sf}', value=1, comment=c_crpix, after=f'CTYPE3{sf}')
hdr.set(f'CRVAL3{sf}', after=f'CRPIX3{sf}')
hdr.set(f'CDELT3{sf}', after=f'CRVAL3{sf}')
hdr.set(f'CUNIT3{sf}', comment=c_cunit, after=f'CDELT3{sf}')
c_pc = 'Coordinate transformation matrix element'
hdr.set(f'PC1_1{sf}', value=1.0, comment=c_pc, after=f'CUNIT3{sf}')
hdr.set(f'PC1_2{sf}', value=0.0, comment=c_pc, after=f'PC1_1{sf}')
hdr.set(f'PC2_1{sf}', value=0.0, comment=c_pc, after=f'PC1_2{sf}')
hdr.set(f'PC2_2{sf}', value=1.0, comment=c_pc, after=f'PC2_1{sf}')
hdr.set(f'PC3_3{sf}', value=1.0, comment=c_pc, after=f'PC2_2{sf}')
# Mapping, which keyword comes from each header
mappings = [('CRPIX3', 'CRPIX1', sf, 0),
('CDELT3', 'CDELT1', sf, 0),
('CRVAL3', 'CRVAL1', sf, 0),
('CTYPE3', 'CTYPE1', sf, 0),
('CRPIX1', 'CRPIX1', '', 1),
('CDELT1', 'CDELT1', '', 1),
('CRVAL1', 'CRVAL1', '', 1),
('CTYPE1', 'CTYPE1', '', 1),
('CUNIT3', 'CUNIT1', sf, 0),
('PC1_1', 'PC1_1', '', 1),
('PC1_2', 'PC1_2', '', 1),
('CRPIX2', 'CRPIX2', '', 1),
('CDELT2', 'CDELT2', '', 1),
('CRVAL2', 'CRVAL2', '', 1),
('CTYPE2', 'CTYPE2', '', 1),
('CUNIT2', 'CUNIT2', '', 1),
('PC2_1', 'PC2_1', '', 1),
('PC2_2', 'PC2_2', '', 1),
('LONPOLE', 'LONPOLE', '', 1),
('LATPOLE', 'LATPOLE', '', 1),
('RADESYS', 'RADESYS', '', 1),
('EQUINOX', 'EQUINOX', '', 1),
('WCSNAME', 'WCSNAME', sf, 0),
('specsys', 'SPECSYS', sf, 0),
('ssysobs', 'SSYSOBS', sf, 0),
('velosys', 'VELOSYS', sf, 0)
]
hdr_in = dict()
hdr_in[0] = hdr_spec
hdr_in[1] = hdr_sky
for dest, orig, key, idx in mappings:
hdr_orig = hdr_in[idx]
korig = orig + key
kdest = dest + sf
try:
hdr[kdest] = hdr_orig[korig], hdr_orig.comments[korig]
except KeyError:
# Ignoring errors. Copy only if keyword exists
pass
return hdr
def main(args=None):
import argparse
import astropy.io.fits as fits
# parse command-line options
parser = argparse.ArgumentParser(prog='convert_rss_cube')
# positional parameters
methods = {'nn': 1, 'linear': 2}
parser.add_argument("rss",
help="RSS file with fiber traces",
type=argparse.FileType('rb'))
parser.add_argument('-p', '--pixel-size', type=float, default=0.3,
metavar='PIXEL_SIZE',
help="Pixel size in arc seconds")
parser.add_argument('-o', '--outfile', default='cube.fits',
help="Name of the output cube file")
parser.add_argument('-d', '--disable-scaling', action='store_true',
help="Disable flux conservation")
parser.add_argument('-m', '--method', action='store', choices=['nn', 'linear'],
default='nn', help="Method of interpolation")
parser.add_argument('--wcs-pa-from-header', action='store_true',
help="Use PA angle from header", dest='pa_from_header')
parser.add_argument('--fix-missing', action='store_true',
help="Interpolate missing fibers")
args = parser.parse_args(args=args)
target_scale = args.pixel_size # Arcsec
p = methods[args.method]
print(f'interpolation method is "{args.method}"')
print('target scale is', target_scale, 'arcsec')
conserve_flux = not args.disable_scaling
with fits.open(args.rss) as rss:
if not args.pa_from_header:
# Doing it here so the change is propagated to
# all alternative coordinates
print('recompute WCS from IPA')
ipa = rss['PRIMARY'].header['IPA']
rss['FIBERS'].header = fixrss.recompute_wcs(rss['FIBERS'].header, ipa=ipa)
if args.fix_missing:
fibid = 623
print(f'interpolate fiber {fibid}')
rss = fixrss.fix_missing_fiber(rss, fibid)
cube = create_cube_from_rss(rss, p, target_scale, conserve_flux=conserve_flux)
cube.writeto(args.outfile, overwrite=True)
if __name__ == '__main__':
main()
| gpl-3.0 | 6,621,899,695,352,916,000 | 30.594724 | 98 | 0.577989 | false | 3.029432 | false | false | false |
airbnb/knowledge-repo | knowledge_repo/repositories/gitrepository.py | 1 | 21111 | import os
import shutil
import logging
import re
import socket
from io import open
import git
import yaml
from ..repository import KnowledgeRepository
from ..utils.encoding import encode
logger = logging.getLogger(__name__)
class GitKnowledgeRepository(KnowledgeRepository):
_registry_keys = ['git']
TEMPLATES = {
'README.md': os.path.abspath(os.path.join(os.path.dirname(__file__), '../templates', 'repository_readme.md')),
'.knowledge_repo_config.yml': os.path.abspath(os.path.join(os.path.dirname(__file__), '../templates', 'repository_config.yml'))
}
@classmethod
def create(cls, uri):
path = uri.replace('git://', '')
if os.path.exists(path):
try:
repo = git.Repo(path)
logger.warning("Repository already exists for uri '{}'. Checking if configuration is needed...".format(uri))
except git.InvalidGitRepositoryError:
if os.path.isdir(path):
logger.warning("Upgrading existing directory at '{}' to a git knowledge repository...".format(path))
else:
raise RuntimeError("File exists at nominated path: {}. Cannot proceed with repository initialization.".format(path))
repo = git.Repo.init(path, mkdir=True)
# Add README and configuration templates
added_files = 0
for filename, template in cls.TEMPLATES.items():
target = os.path.join(path, filename)
if not os.path.exists(target):
shutil.copy(template, target)
repo.index.add([filename])
added_files += 1
else:
logger.warning("Not overriding existing file '{}'.".format(filename))
if added_files > 0:
repo.index.commit("Initial creation of knowledge repository.")
return GitKnowledgeRepository(path)
def init(self, config='git:///.knowledge_repo_config.yml', auto_create=False):
self.config.update_defaults(published_branch='master')
self.config.update_defaults(remote_name='origin')
self.auto_create = auto_create
self.path = self.uri.replace('git://', '')
# Check if a legacy configuration exists, and if so, print a warning
try:
self.git_read('.knowledge_repo_config.py')
logger.warning(
"This knowledge repository has a legacy configuration file that "
"will not be loaded due to security issues "
"(.knowledge_repo_config.py). This may lead to unexpected "
"behavior. Please talk to your local Knowledge Repo admins "
"for advice if you are unsure."
)
except:
pass
if config.startswith('git:///'):
assert config.endswith('.yml'), "In-repository configuration must be a YAML file."
try:
self.config.update(yaml.safe_load(self.git_read(config.replace('git:///', ''))))
except KeyError:
logger.warning("Repository missing configuration file: {}".format(config))
else:
self.config.update(config)
@property
def path(self):
return self._path
@path.setter
def path(self, path):
assert isinstance(path, str), "The path specified must be a string."
path = os.path.abspath(os.path.expanduser(path))
if not os.path.exists(path):
path = os.path.abspath(path)
if self.auto_create:
self.create(path)
else:
raise ValueError("Provided path '{}' does not exist.".format(path))
assert self.__is_valid_repo(
path), "Provided path '{}' is not a valid repository.".format(path)
self._path = path
self.uri = path # Update uri to point to absolute path of repository
def __is_valid_repo(self, path):
try:
git.Repo(path)
return True
except git.InvalidGitRepositoryError:
return False
@property
def git(self):
if not hasattr(self, '_git'):
self._git = git.Repo(self.path)
return self._git
@property
def git_has_remote(self):
return hasattr(self.git.remotes, self.config.remote_name)
@property
def git_remote(self):
if self.git_has_remote:
return self.git.remote(self.config.remote_name)
return None
# ----------- Repository actions / state ------------------------------------
@property
def revision(self):
c = self.git.commit()
return "{}_{}".format(str(c.committed_date), c.hexsha)
def update(self, branch=None):
branch = branch or self.config.published_branch
if not self.git_has_remote:
return
if not self.__remote_available:
logger.warning("Cannot connect to remote repository hosted on {}. Continuing locally with potentially outdated code.".format(
self.__remote_host))
return
logger.info("Fetching updates to the knowledge repository...")
self.git_remote.fetch()
current_branch = self.git.active_branch
self.git.branches[branch].checkout()
self.git_remote.pull(branch)
current_branch.checkout()
def set_active_draft(self, path): # TODO: deprecate
branch = self.git_branch_for_post(self._kp_path(path))
self.config.published_branch = branch.name
branch.checkout()
@property
def status(self):
return {
'branch': self.git.active_branch.name,
'changed_files': [str(diff.a_path) for diff in self.git_diff()]
}
@property
def status_message(self):
status = self.status
message = "Currently checked out on the `{branch}` branch.".format(branch=status['branch'])
if len(status['changed_files']) > 0:
message += "Files modified: \n {modified}".format(modified='\n\t- '.join(status['changed_files']))
return message
# ---------------- Git properties and actions -------------------------
def git_dir(self, prefix=None, commit=None):
commit = self.git.commit(commit or self.config.published_branch)
tree = commit.tree
if prefix is not None:
tree = tree[prefix]
return [o.path for o in tree.traverse(
prune=lambda i, d: isinstance(i, git.Submodule) or os.path.dirname(i.path).endswith('.kp'),
visit_once=False,
predicate=lambda i, d: i.path.endswith('.kp')
)
]
def git_read(self, path, commit=None):
commit = self.git.commit(commit or self.config.published_branch)
return commit.tree[path].data_stream.read()
@property
def git_local_branches(self):
unmerged_branches = [branch.replace(
'*', '').strip() for branch in self.git.git.branch('--no-merged', self.config.published_branch).split('\n')]
return unmerged_branches
def __get_path_from_ref(self, ref):
refs = ref.split('/')
for i, ref in enumerate(refs):
if ref.endswith('.kp'):
break
if not ref.endswith('.kp'):
return None
return '/'.join(refs[:i + 1])
def git_local_posts(self, branches=None, as_dict=False):
if branches is None:
branches = self.git_local_branches
posts = {}
for branch in branches:
posts[branch] = set([self.__get_path_from_ref(diff.a_path) for diff in self.git_diff(branch)])
posts[branch].discard(None)
if not as_dict:
out_posts = set()
for branch, ps in posts.items():
out_posts.update(ps)
return list(out_posts)
return posts
def git_branch_for_post(self, path, interactive=False):
if path is None:
return None
if path in self.git_local_branches:
return self.git_branch(path)
branches = []
for branch in self.git_local_branches:
if path in self.git_local_posts(branches=[branch]):
branches.append(branch)
if len(branches) == 0:
if path in self.dir():
return self.git_branch(self.config.published_branch)
return None
if len(branches) == 1:
return self.git_branch(branches[0])
# Deal with ambiguity
if interactive:
print("There are multiple branches for post '{}'.".format(path))
for i, branch in enumerate(branches):
print("{}. {}".format(i, branch))
response = None
while not isinstance(response, int):
response = input('Please select the branch you would like to use: ')
try:
response = int(response)
except:
response = None
else:
response = 0
return self.git_branch(branches[response])
def git_branch(self, branch=None):
if isinstance(branch, git.Head):
return branch
if branch is None:
return self.git.active_branch
if not isinstance(branch, str):
raise ValueError("'{}' of type `{}` is not a valid branch descriptor.".format(branch, type(branch)))
try:
return self.git.branches[branch]
except IndexError:
raise ValueError("Specified branch `{}` does not exist.".format(branch))
def git_checkout(self, branch, soft=False, reset=False, create=False):
if not create:
branch_obj = self.git_branch(branch)
branch_obj.checkout()
return branch_obj
if soft and self.git.active_branch.name not in [self.config.published_branch, branch] and not self.git.active_branch.name.endswith('.kp'):
response = None
while response not in ['y', 'n']:
response = input('It looks like you have checked out the `{}` branch, whereas we were expecting to use `{}`. Do you want to use your current branch instead? (y/n) '.format(self.git.active_branch.name, branch))
if response == 'y':
branch = self.git.active_branch.name
if reset or branch not in [b.name for b in self.git.branches]:
ref_head = None
if self.git_has_remote:
for ref in self.git_remote.refs:
if ref.name == branch:
ref_head = ref
break
if not ref_head:
ref_head = self.git_remote.refs.master if self.git_has_remote else self.git.branches.master
else:
logger.warning(
"The branch `{}` already exists as upstream, and you maybe clobbering someone's work. Please check.".format(ref_head.name))
branch = self.git.create_head(branch, ref_head, force=True)
else:
branch = self.git_branch(branch)
branch.checkout()
return branch
def git_diff(self, ref=None, ref_base=None, patch=False):
commit = self.git.commit(ref)
ref = self.git.merge_base(self.git.commit(ref_base or self.config.published_branch), commit)[0]
return commit.diff(ref, create_patch=patch)
# ---------------- Post retrieval methods --------------------------------
def _dir(self, prefix, statuses):
posts = set()
if any([status != self.PostStatus.PUBLISHED for status in statuses]):
local_posts = self.git_local_posts(as_dict=True)
for status in statuses:
if status == self.PostStatus.PUBLISHED:
posts.update(self.git_dir(prefix=prefix, commit=self.config.published_branch))
else:
for branch in local_posts:
for post_path in local_posts[branch]:
if prefix is not None and not post_path.startswith(prefix):
continue
if self._kp_status(post_path, branch=branch) in statuses:
posts.add(post_path)
for post in sorted(posts):
yield post
# ------------- Post submission / addition user flow ----------------------
def _add_prepare(self, kp, path, update=False, branch=None, squash=False, message=None):
target = os.path.abspath(os.path.join(self.path, path))
if self.git_has_remote:
branch = branch or path
else:
logger.warning("This repository does not have a remote, and so post review is being skipped. Adding post directly into published branch...")
branch = self.config.published_branch
# Create or checkout the appropriate branch for this project
logger.info("Checking out (and/or creating) a new branch `{}`...".format(branch))
branch_obj = self.git_checkout(branch, soft=True, reset=squash, create=True)
branch = branch_obj.name
# Verify that post path does not exist (unless we are updating the post)
assert update or not os.path.exists(target), "A knowledge post already exists at '{}'! If you wanted to update it, please pass the '--update' flag.".format(path)
# Add knowledge post to local branch
logger.info("Adding and committing '{}' to local branch `{}`...".format(path, branch))
def _add_cleanup(self, kp, path, update=False, branch=None, squash=False, message=None):
self.git.index.add([path])
# Commit the knowledge post and rollback if it fails
try:
if message is None:
message = input("Please enter a commit message for this post: ")
self.git.index.commit(message)
except (KeyboardInterrupt, Exception) as e:
if message is None:
logger.warning("No commit message input for post '{}'. Rolling back post addition...")
else:
logger.error("Something went wrong. Rolling back post addition...")
self.git.index.reset()
try:
self.git.git.clean('-df', path)
self.git.git.checkout('--', path)
except:
pass
raise e
def _submit(self, path=None, branch=None, force=False):
if not self.git_has_remote:
raise RuntimeError("Could not find remote repository `{}` into which this branch should be submitted.".format(self.config.remote_name))
if branch is None and path is None:
raise ValueError("To submit a knowledge post, a path to the post and/or a git branch must be specified.")
if branch is None:
branch = self.git_branch_for_post(path)
if branch is None:
raise ValueError("It does not appear that you have any drafts in progress for '{}'.".format(path))
if not self.__remote_available:
raise RuntimeError("Cannot connect to remote repository {} ({}). Please check your connection, and then try again.".format(self.config.remote_name, self.git_remote.url))
self.git_remote.push(branch, force=force)
logger.info("Pushed local branch `{}` to upstream branch `{}`. Please consider starting a pull request, or otherwise merging into master.".format(branch, branch))
def _publish(self, path): # Publish a post for general perusal
raise NotImplementedError
def _unpublish(self, path): # unpublish a post for general perusal
raise NotImplementedError
def _accept(self, path): # Approve to publish a post for general perusal
pass
def _remove(self, path, all=False):
raise NotImplementedError
# ------------ Knowledge Post Data Retrieval Methods -------------------------
def _kp_uuid(self, path):
try:
return self._kp_read_ref(path, 'UUID')
except:
return None
def _kp_path(self, path, rel=None):
return KnowledgeRepository._kp_path(self, os.path.expanduser(path), rel=rel or self.path)
def _kp_exists(self, path, revision=None):
# For speed, first check whether it exists in the checked out branch, then search more deeply
return os.path.isdir(os.path.join(self.path, path)) or (self.git_branch_for_post(path, interactive=False) is not None)
def _kp_status(self, path, revision=None, detailed=False, branch=None):
if not hasattr(self, '_dir_cache'):
self._dir_cache = {path: None for path in self.dir()}
if path in self._dir_cache:
return self.PostStatus.PUBLISHED
if branch is None:
branch = self.git_branch_for_post(path, interactive=False)
else:
branch = self.git_branch(branch)
if branch is None:
return ValueError("No such post: {}".format(path))
if branch.name == self.config.published_branch:
status = self.PostStatus.PUBLISHED, None
elif self.git_has_remote and branch.name in self.git_remote.refs:
remote_branch = self.git_remote.refs[branch.name].name
behind = len(list(self.git.iter_commits('{}..{}'.format(branch, remote_branch))))
ahead = len(list(self.git.iter_commits('{}..{}'.format(remote_branch, branch))))
status = (self.PostStatus.SUBMITTED,
(" - {} commits behind".format(behind) if behind else '') +
(" - {} commits ahead".format(ahead) if ahead else '') +
(" [On branch: {}]".format(branch) if branch != path else ''))
else:
status = self.PostStatus.DRAFT, None
if detailed:
return status
return status[0]
def _kp_get_revision(self, path):
# We use a 'REVISION' file in the knowledge post folder rather than using git
# revisions because using git rev-parse is slow.
try:
return int(self._kp_read_ref(path, 'REVISION'))
except:
return 0
def _kp_get_revisions(self, path): # slow
# TODO: In the future, we may want to use something like:
# self.git.iter_commits(paths=os.path.join(self.path, path, 'knowledge.md'))
# But this will require a lot of piping and may not make sense in the context
# of a non-bare git repository.
raise NotImplementedError
def _kp_write_ref(self, path, reference, data, uuid=None, revision=None):
ref_path = os.path.join(self.path, path, reference)
ref_dir = os.path.dirname(ref_path)
if not os.path.exists(ref_dir):
os.makedirs(ref_dir)
with open(ref_path, 'wb') as f:
return f.write(data)
def _kp_dir(self, path, parent=None, revision=None): # TODO: Account for revision
if parent:
path = os.path.join(path, parent)
for dirpath, dirnames, filenames in os.walk(os.path.join(self.path, path)):
for filename in filenames:
if dirpath == "" and filename == "REVISION":
continue
yield os.path.relpath(os.path.join(dirpath, filename), os.path.join(self.path, path))
def _kp_has_ref(self, path, reference, revision=None): # TODO: Account for revision
return os.path.isfile(os.path.join(self.path, path, reference))
def _kp_diff(self, path, head, base):
raise NotImplementedError
def _kp_new_revision(self, path, uuid=None):
self._kp_write_ref(path, "REVISION", encode(self._kp_get_revision(path) + 1))
if uuid:
self._kp_write_ref(path, "UUID", encode(uuid))
def _kp_read_ref(self, path, reference, revision=None):
with open(os.path.join(self.path, path, reference), 'rb') as f:
return f.read()
# ------------- Utility methods --------------------------------------
def __abspath(self, path):
return os.path.abspath(os.path.join(self.path, path))
@property
def __remote_host(self):
if self.git_has_remote:
# TODO: support more types of hosts
m = re.match(r'.*?@(.*?):\.*?', self.git_remote.url)
if m: # shorthand ssh uri
return m.group(1)
return None
@property
def __remote_port(self):
port = 22
if self.git_has_remote:
m = re.match(r'^(.*?)?@([^/:]*):?([0-9]+)?', self.git_remote.url)
if m:
if m.group(3):
port = m.group(3)
return int(port)
@property
def __remote_available(self):
# TODO: support more types of hosts
host = self.__remote_host
port = self.__remote_port
if host:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.5)
try:
s.connect((socket.gethostbyname(host), port))
return True
except:
return False
finally:
s.close()
return True
| apache-2.0 | -6,867,122,787,053,720,000 | 39.365201 | 225 | 0.577566 | false | 4.185369 | true | false | false |
bruckhaus/challenges | python_challenges/hanoi.py | 1 | 1078 | __author__ = 'tilmann.bruckhaus'
class Hanoi:
def __init__(self, disks):
print "Playing \"Towers of Hanoi\" for ", disks, " disks:"
self.disks = disks
self.source_peg = []
self.helper_peg = []
self.target_peg = []
self.set_up_pegs()
self.show()
def solve(self):
self.move(self.disks, self.source_peg, self.target_peg, self.helper_peg)
print "\nSolved."
def move(self, peg_count, source_peg, target_peg, helper_peg):
if peg_count >= 1:
self.move(peg_count - 1, source_peg, helper_peg, target_peg)
target_peg.append(source_peg.pop())
self.show()
self.move(peg_count - 1, helper_peg, target_peg, source_peg)
def show(self):
print "\nsource_pge: ", self.source_peg
print "helper_pge: ", self.helper_peg
print "target_pge: ", self.target_peg
def set_up_pegs(self):
for i in reversed(range(1, self.disks + 1)):
self.source_peg.append(i)
if __name__ == '__main__':
Hanoi(4).solve() | mit | 6,909,502,216,494,593,000 | 28.972222 | 80 | 0.556586 | false | 3.208333 | false | false | false |
lixxu/wxbreads | wxbreads/login.py | 1 | 7576 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import ldap3
import windbreads.utils as wdu
import wx
from ldap3tool import LDAPTool
import wxbreads.widgets as wxw
from wxbreads.base import BaseDialog
def ldap_login(server, base_dn, login_name, password, close=True, **kwargs):
ldap = LDAPTool(server=server, base_dn=base_dn)
try:
use_ssl = kwargs.pop("use_ssl", False)
kw = kwargs.copy()
if use_ssl:
kw.update(server=ldap.open_server(use_ssl=use_ssl))
ldap.connect(login_name, password, **kw)
except ldap3.core.exceptions.LDAPSocketOpenError as ex:
return 100, ex
except ldap3.core.exceptions.LDAPBindError:
return 200, None
except Exception as ex:
return 300, ex
else:
if close:
ldap.close()
else:
return 0, ldap
return 0, None
class LoginWindow(BaseDialog):
app_title = "User Login"
def __init__(self, **kwargs):
self.enable_cancel = kwargs.get("enable_cancel", True)
self.ldap_kwargs = kwargs.pop("ldap_kwargs", {})
self.need_busy = kwargs.pop("need_busy", False)
self.allowed_names = kwargs.pop("allowed_names", None)
super(LoginWindow, self).__init__(**kwargs)
self.panel = wx.Panel(self)
self.parent = parent = kwargs.get("parent")
root_user = kwargs.get("root_user")
root_pass = kwargs.get("root_pass")
last_user = kwargs.get("last_user")
self.domain = kwargs.get("domain", "")
if parent:
if not root_user:
root_user = getattr(parent, "root_user", "root")
if not root_pass:
root_pass = getattr(parent, "root_pass", "")
if not last_user:
last_user = getattr(parent, "login_user")
self.root_user = root_user or "root"
self.root_pass = root_pass or ""
self.last_user = last_user or ""
self.pwd = kwargs.get("password", "")
self.current_user = None
self.ldap_obj = None
self.server = kwargs.get("server", "ldap-server")
self.base_dn = kwargs.get("base_dn", "dc=corp,dc=company,dc=org")
self.destroy = kwargs.get("destroy", True)
self.can_exit = kwargs.get("can_exit", True)
self.is_login = False
self.setup_ui()
self.Bind(wx.EVT_BUTTON, self.on_login, id=wx.ID_OK)
self.Bind(wx.EVT_BUTTON, self.on_quit, id=wx.ID_CANCEL)
if kwargs.get("show_win", True):
self.show()
def setup_ui(self):
sizer = wx.BoxSizer(wx.VERTICAL)
size = (120, -1)
label_style = wx.ALIGN_RIGHT
kwargs = dict(
fsize=size, ssize=(200, -1), fstyle=label_style, t=self.t
)
# Name field
_, self.name_tc = wxw.add_text_row(
self.panel,
sizer,
label="Login Name",
value=self.last_user,
**kwargs
)
wxw.focus_on(self.name_tc)
# Password
_, self.pwd_tc = wxw.add_text_row(
self.panel,
sizer,
label="Password",
value=self.pwd,
sstyle=wx.TE_PASSWORD,
**kwargs
)
ok_btn, cancel_btn = wxw.add_ok_buttons(
self.panel, sizer, size=(100, 30), ok_text="Login", t=self.t
)
self.ok_btn = ok_btn
self.cancel_btn = cancel_btn
cancel_btn.Enable(self.enable_cancel)
self.panel.SetSizer(sizer)
self.panel.Layout()
sizer.Fit(self)
def high_light(self, wgt, focus=True):
wgt.Clear()
wgt.SetBackgroundColour(wxw.HIGHLIGHT_RED)
if focus:
wgt.SetFocus()
def get_field_values(self):
self.login_name = self.name_tc.GetValue().strip().lower()
self.password = self.pwd_tc.GetValue()
def on_login(self, event):
self.ok_btn.Enable(False)
self.cancel_btn.Enable(False)
self.is_login = True
self.current_user = None
self.ldap_obj = None
self.start_delay_work(self.after_submit, self.do_submit)
def after_submit(self, delay_result):
try:
delay_result.get()
except Exception as e:
self.popup("Error", e, "e")
self.ok_btn.Enable(True)
self.cancel_btn.Enable(True)
self.is_login = False
if self.current_user:
if self.parent and hasattr(self.parent, "login_user"):
self.parent.login_user = self.current_user
self.parent.ldap_obj = self.ldap_obj
self.on_quit()
def do_submit(self):
self.get_field_values()
if not self.login_name:
self.high_light(self.name_tc)
self.Refresh()
return
self.name_tc.SetBackgroundColour(wx.NullColour)
if not self.password:
self.name_tc.ClearBackground()
self.high_light(self.pwd_tc)
self.Refresh()
return
if self.parent:
root_user = getattr(self.parent, "root_user", "root")
root_pass = getattr(self.parent, "root_pass", "")
else:
root_user = self.root_user
root_pass = self.root_pass
if self.login_name == root_user:
if self.password == root_pass:
self.current_user = self.login_name
self.ldap_obj = None
return
self.high_light(self.pwd_tc)
self.Refresh()
return
if "\\" in self.login_name:
domain, username = self.login_name.split("\\", 1)
else:
domain, username = self.domain, self.login_name
if self.allowed_names and username not in self.allowed_names:
self.popup("Error", "This user is not allowed to login", "e")
return
busy = None
if self.need_busy:
busy = self.show_busy("connecting to server...")
ec, msg = ldap_login(
self.server,
self.base_dn,
"{}\\{}".format(domain, username) if domain else username,
self.password,
**self.ldap_kwargs
)
if self.need_busy:
self.hide_busy(busy)
if ec in (100, 300):
self.popup("Error", msg, "e")
return
elif ec == 200:
self.popup(
"Authentication Error", "Username/password not match", "e"
)
self.high_light(self.pwd_tc)
self.Refresh()
return
if self.parent and getattr(self.parent, "extra_login_check"):
if not self.parent.extra_login_check(username, msg):
return
self.current_user = username
self.ldap_obj = msg
def on_quit(self, event=None):
if self.can_exit and (not self.is_login):
if self.destroy:
self.Destroy()
else:
self.Hide()
def test_run():
app = wx.App()
LoginWindow(last_user="domain\\username", password="password")
app.MainLoop()
def test_i18n_run():
from functools import partial
import windbreads.common_i18n as wdi18n
zh = wdi18n.zh
zh.setdefault("User Login", "用户登录")
t = partial(wdu.tt, lang="zh", po=dict(zh=zh))
app = wx.App()
LoginWindow(last_user="domain\\test", password="password", t=t)
app.MainLoop()
if __name__ == "__main__":
test_run()
test_i18n_run()
| bsd-2-clause | 8,304,882,785,256,869,000 | 28.220077 | 76 | 0.546776 | false | 3.65251 | false | false | false |
csaldias/python-usm | Certámenes resueltos/Certamen 2 2013-2/pregunta3.py | 1 | 3355 | #Los datos del problema.
salidas = [ ((2013,11,2), 'LAN123','NewYork','EMBARQUE'),
((2013,4,28), 'MX201', 'Cancun', 'ARRIBADO'),
#...
]
vuelos = { 'LAN123': {'16740623-7', '1111111-1', '555555-5'},
'ALGO00': {'444444-4'},
'MX201': {'777777-7'},
# ...
}
personas = {'16740623-7':('OEncina', 'NewYork', (1987, 7, 22), 62000),
'444444-4':('Edwar Lopez', 'Miami', (1900, 3, 11), 120000),
'777777-7':('Jorge Perez', 'Santiago', (1989, 2, 17), 1000),
'555555-5':('Daniela Perez', 'Roma', (1991, 8, 17), 12000),
'1111111-1':('Sandra Lazo', 'Ibiza', (1970, 4, 14), 10000),
# ...
}
# Pregunta a)
def estado_pasajero(nombre):
#Iteramos sobre el diccionario personas, y desempaquetamos todo
for rut, datos in personas.items():
nombre, ciudad_origen, fecha_nacimiento, millas = datos
#Si esta es la persona que buscamos...
if nombre_pasajero == nombre:
#Revisamos los vuelos...
for codigo_vuelo, rut_pasajeros in vuelos.items():
#Si la persona esta en este vuelo...
if rut in rut_pasajeros:
#Revisamos las salidas...
for datos_salida in salidas:
fecha_salida, codigo, ciudad, estado_vuelo = datos_salida
#Si encontramos el vuelo...
if codigo_vuelo == codigo:
#Devolvemos todos los datos.
return (rut, ciudad_de_origen, estado_vuelo)
#Si no hay coincidencias, devolvemos None
return None
# Pregunta b)
def cambia_de_vuelo(rut, nuevo_vuelo, millas):
for codigo, ruts in vuelos.items():
#Si el pasajero existe...
if rut in ruts:
#Quitamos al pasajero de su actual vuelo...
ruts.remove(rut)
#Y lo agregamos al nuevo vuelo. Recuerda que el diccionario vuelos
#tiene como llave un string y como valor un conjunto, por lo que
#usamos .add()
vuelos[nuevo_vuelo].add(rut)
#Sumamos las millas y actualizamos
datos_persona = personas[rut]
nombre, ciudad_origen, fecha_nacimiento, cantidad_millas = datos_persona
cantidad_millas += millas
datos_persona_actualizados = (nombre, ciudad_origen, fecha, cantidad_millas)
personas[rut] = datos_persona_actualizados
return True
#Si el pasajero no existe, devolvemos False.
return False
# Pregunta c)
def filtro_nac(fecha, estado):
filtro = set()
for rut, datos_persona in personas.items():
#Solo necesitamos el nombre y la fecha de nacimiento, el resto de los
#datos los podemos omitir.
nombre, _, fecha_nacimiento, _ = datos_persona
#Si la fecha de nacimiento es mayor a la pedida...
if fecha < fecha_nacimiento:
#Obtenemos los datos que necesitamos ocupando la primera funcion
#(BTW, a los profes le gusta que hagas esto)
_, _, estado_vuelo = estado_pasajero(nombre)
#Si el estado es el pedido...
if estado_vuelo == estado:
#Lo agregamos
filtro.add(rut)
return conjunto | mit | -6,426,673,959,087,829,000 | 38.482353 | 88 | 0.554098 | false | 2.961165 | false | false | false |
datacommonsorg/data | scripts/ourworldindata/covid19/preprocess_csv.py | 1 | 4555 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import io
import ssl
import urllib.request
import sys
sys.path.insert(1, '../../../util')
from alpha2_to_dcid import COUNTRY_MAP
country_set = set(COUNTRY_MAP.values())
output_columns = [
'Date', 'GeoId', 'CumulativeCount_Vaccine_COVID_19_Administered',
'IncrementalCount_Vaccine_COVID_19_Administered',
'CumulativeCount_MedicalConditionIncident_COVID_19_ConfirmedCase',
'IncrementalCount_MedicalConditionIncident_COVID_19_ConfirmedCase',
'CumulativeCount_MedicalConditionIncident_COVID_19_PatientDeceased',
'IncrementalCount_MedicalConditionIncident_COVID_19_PatientDeceased',
'Count_MedicalConditionIncident_COVID_19_PatientInICU',
'Count_MedicalConditionIncident_COVID_19_PatientHospitalized',
'CumulativeCount_MedicalTest_ConditionCOVID_19',
'IncrementalCount_MedicalTest_ConditionCOVID_19'
]
# Automate Template MCF generation since there are many Statitical Variables.
TEMPLATE_MCF_TEMPLATE = """
Node: E:OurWorldInData_Covid19->E{index}
typeOf: dcs:StatVarObservation
variableMeasured: dcs:{stat_var}
measurementMethod: dcs:OurWorldInData_COVID19
observationAbout: C:OurWorldInData_Covid19->GeoId
observationDate: C:OurWorldInData_Covid19->Date
value: C:OurWorldInData_Covid19->{stat_var}
"""
def create_formatted_csv_file(f_in, csv_file_path):
with open(csv_file_path, 'w', newline='') as f_out:
writer = csv.DictWriter(f_out,
fieldnames=output_columns,
lineterminator='\n')
writer.writeheader()
reader = csv.DictReader(f_in)
for row_dict in reader:
place_dcid = 'country/%s' % row_dict['iso_code']
# Skip invalid country ISO code.
if not place_dcid in country_set:
continue
processed_dict = {
'Date':
row_dict['date'],
'GeoId':
place_dcid,
'CumulativeCount_Vaccine_COVID_19_Administered':
row_dict['total_vaccinations'],
'IncrementalCount_Vaccine_COVID_19_Administered':
row_dict['new_vaccinations'],
'CumulativeCount_MedicalConditionIncident_COVID_19_ConfirmedCase':
row_dict['total_cases'],
'IncrementalCount_MedicalConditionIncident_COVID_19_ConfirmedCase':
row_dict['new_cases'],
'CumulativeCount_MedicalConditionIncident_COVID_19_PatientDeceased':
row_dict['total_deaths'],
'IncrementalCount_MedicalConditionIncident_COVID_19_PatientDeceased':
row_dict['new_deaths'],
'Count_MedicalConditionIncident_COVID_19_PatientInICU':
row_dict['icu_patients'],
'Count_MedicalConditionIncident_COVID_19_PatientHospitalized':
row_dict['hosp_patients'],
'CumulativeCount_MedicalTest_ConditionCOVID_19':
row_dict['total_tests'],
'IncrementalCount_MedicalTest_ConditionCOVID_19':
row_dict['new_tests'],
}
writer.writerow(processed_dict)
def create_tmcf_file(tmcf_file_path):
stat_vars = output_columns[2:]
with open(tmcf_file_path, 'w', newline='') as f_out:
for i in range(len(stat_vars)):
f_out.write(
TEMPLATE_MCF_TEMPLATE.format_map({
'index': i,
'stat_var': output_columns[2:][i]
}))
if __name__ == '__main__':
gcontext = ssl.SSLContext()
with urllib.request.urlopen(
'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv',
context=gcontext) as response:
f_in = io.TextIOWrapper(response)
create_formatted_csv_file(f_in, 'OurWorldInData_Covid19.csv')
create_tmcf_file('OurWorldInData_Covid19.tmcf')
| apache-2.0 | 278,184,952,502,127,300 | 40.036036 | 106 | 0.640834 | false | 3.553042 | true | false | false |
google/pyctr | examples/jax/jax.py | 1 | 3367 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains overloads to convert Python to equivalent JAX code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from jax import lax
from pyctr.overloads import py_defaults
from pyctr.overloads import staging
init = py_defaults.init
assign = py_defaults.assign
read = py_defaults.read
call = py_defaults.call
def if_stmt(cond, body, orelse, local_writes):
"""Functional form of an if statement.
Args:
cond: Callable with no arguments, predicate of conditional.
body: Callable with no arguments, and outputs of the positive (if) branch as
return type.
orelse: Callable with no arguments, and outputs of the negative (else)
branch as return type.
local_writes: list(pyct.Variable), list of variables assigned in either body
or orelse.
Returns:
Tuple containing the statement outputs.
"""
cond_result = cond()
def if_body(*_):
modified_vals, _ = staging.execute_isolated(body, local_writes)
return modified_vals
def if_orelse(*_):
modified_vals, _ = staging.execute_isolated(orelse, local_writes)
return modified_vals
result_values = lax.cond(cond_result, (), if_body, (), if_orelse)
for var, retval in zip(local_writes, result_values):
var.val = retval
return result_values
def while_stmt(cond, body, _, local_writes):
"""Functional form of a while statement."""
local_writes = [
var for var in local_writes if not py_defaults.is_undefined(var.val)
]
def while_test(state):
for var, s in zip(local_writes, state):
var.val = s
_, result_values = staging.execute_isolated(cond, local_writes)
return result_values
def while_body(state):
for var, s in zip(local_writes, state):
var.val = s
modified_vals, _ = staging.execute_isolated(body, local_writes)
return modified_vals
result_values = lax.while_loop(while_test, while_body,
[var.val for var in local_writes])
for var, val in zip(local_writes, result_values):
var.val = val
return result_values
def for_stmt(target, iter_, body, orelse, modified_vars):
"""Functional form of a for statement."""
del orelse
modified_vars = [
var for var in modified_vars if not py_defaults.is_undefined(var.val)
]
def for_body(idx, state):
for var, s in zip(modified_vars, state):
var.val = s
target.val = iter_[idx]
modified_vals, _ = staging.execute_isolated(body, modified_vars)
return modified_vals
results = lax.fori_loop(0, len(iter_), for_body,
[var.val for var in modified_vars])
for var, val in zip(modified_vars, results):
var.val = val
| apache-2.0 | -1,682,470,575,494,374,000 | 29.333333 | 80 | 0.672706 | false | 3.766219 | false | false | false |
jakecoppinger/departing.io | parse_nxtbus.py | 1 | 2407 | # Departing.io, a web app to answer the question of "When will the next bus come?"
# Copyright (C) 2016 Jake Coppinger
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import xmltodict
def printPrettyDict(d):
print(json.dumps(d, sort_keys=True, indent=4, separators=(',', ': ')))
class ParseNxtbus:
def __init__(self,responseDict):
jsonData = xmltodict.parse(responseDict)
self._data = jsonData
def stopMonitoring(self):
serviceDelivery = self._data["Siri"]["ServiceDelivery"]
arrivals = []
if "StopMonitoringDelivery" in serviceDelivery:
if "MonitoredStopVisit" in serviceDelivery["StopMonitoringDelivery"]:
monitoredBuses = serviceDelivery["StopMonitoringDelivery"]["MonitoredStopVisit"]
# Check if there is more than one bus arriving
if "MonitoredVehicleJourney" in monitoredBuses:
busJourney = self._parseBusJourney(monitoredBuses)
if busJourney:
arrivals.append(busJourney)
else:
for journey in monitoredBuses:
busJourney = self._parseBusJourney(journey)
if busJourney:
arrivals.append(busJourney)
return arrivals
def _parseBusJourney(self,busJourneyDictionary):
if "AimedDepartureTime" in busJourneyDictionary["MonitoredVehicleJourney"]["MonitoredCall"]: # Is departing
bus = busJourneyDictionary["MonitoredVehicleJourney"]
busJourney = {}
busJourney["busRouteNumber"] = bus["ExternalLineRef"]
busJourney["destinationName"] = bus["DestinationName"]
if bus["Monitored"] == "true":
busJourney["departureTime"] = bus["MonitoredCall"]["ExpectedDepartureTime"]
busJourney["monitored"] = True
else:
busJourney["departureTime"] = bus["MonitoredCall"]["AimedDepartureTime"]
busJourney["monitored"] = False
return busJourney
return None
| gpl-3.0 | -5,955,612,718,406,878,000 | 35.469697 | 109 | 0.7258 | false | 3.438571 | false | false | false |
cathywu/flow | scripts/sync_s3.py | 1 | 1335 | """Script for syncing files form s3 down to your local machine.
Code is heavily based on
https://github.com/rll/rllab/blob/master/scripts/sync_s3.py
"""
import sys
from flow import config
import os
import argparse
sys.path.append('.')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('folder', type=str, default=None, nargs='?')
parser.add_argument('--dry', action='store_true', default=False)
parser.add_argument('--bare', action='store_true', default=False)
args = parser.parse_args()
remote_dir = config.AWS_S3_PATH
local_dir = os.path.join(config.LOG_DIR, "s3")
if args.folder:
remote_dir = os.path.join(remote_dir, args.folder)
local_dir = os.path.join(local_dir, args.folder)
if args.bare:
command = (
("aws s3 sync {remote_dir} {local_dir} --exclude '*' --include " +
"'*.csv' --include '*.json' --content-type \"UTF-8\"")
.format(local_dir=local_dir, remote_dir=remote_dir))
else:
command = (
("aws s3 sync {remote_dir} {local_dir} --exclude '*stdout.log' " +
"--exclude '*stdouterr.log' --content-type \"UTF-8\"")
.format(local_dir=local_dir, remote_dir=remote_dir))
if args.dry:
print(command)
else:
os.system(command)
| mit | 2,369,196,079,049,326,000 | 35.081081 | 78 | 0.608989 | false | 3.312655 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.