commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
---|---|---|---|---|---|---|---|
b269ed70223591c81d13f97e48c74ced12cec661
|
Update 4-keys-keyboard.py
|
Python/4-keys-keyboard.py
|
Python/4-keys-keyboard.py
|
# Time: O(n)
# Space: O(1)
class Solution(object):
def maxA(self, N):
"""
:type N: int
:rtype: int
"""
if N < 7:
return N
dp = [i for i in xrange(N+1)]
for i in xrange(7, N+1):
dp[i % 6] = max(dp[(i-4) % 6]*3,dp[(i-5) % 6]*4)
return dp[N % 6]
|
Python
| 0.000004 |
@@ -288,16 +288,17 @@
%25 6%5D*3,
+
dp%5B(i-5)
|
1dd3e7436c19ba3146be6e34da39bd81dc1efd6e
|
Implement AES file encryption and decryption
|
file_crypto_tools.py
|
file_crypto_tools.py
|
Python
| 0.005043 |
@@ -0,0 +1,2916 @@
+############ Module with cryptographics functions for Storj GUI Client ##########%0A## Based on: %3Chttp://stackoverflow.com/questions/16761458/how-to-aes-encrypt-decrypt-files-using-python-pycrypto-in-an-openssl-compatible%3E ##%0A%0Afrom hashlib import md5%0Afrom Crypto.Cipher import AES%0Afrom Crypto import Random%0A%0A%0Aclass FileCrypto():%0A def encrypt_file (self, algorithm, file_path, encrypted_file_save_path, password):%0A if algorithm == %22AES%22:%0A with open(file_path, 'rb') as in_file, open(encrypted_file_save_path, 'wb') as out_file:%0A self.encrypt_file_aes(in_file, out_file, password)%0A%0A def decrypt_file (self, algorithm, file_path, decrypted_file_save_path, password):%0A if algorithm == %22AES%22:%0A with open(file_path, 'rb') as in_file, open(decrypted_file_save_path, 'wb') as out_file:%0A self.decrypt_file_aes(in_file, out_file, password)%0A%0A%0A def derive_key_and_iv(self, password, salt, key_length, iv_length):%0A d = d_i = ''%0A while len(d) %3C key_length + iv_length:%0A d_i = md5(d_i + password + salt).digest()%0A d += d_i%0A return d%5B:key_length%5D, d%5Bkey_length:key_length + iv_length%5D%0A%0A def encrypt_file_aes(self, in_file, out_file, password, key_length=32):%0A bs = AES.block_size%0A salt = Random.new().read(bs - len('Salted__'))%0A key, iv = self.derive_key_and_iv(password, salt, key_length, bs)%0A cipher = AES.new(key, AES.MODE_CBC, iv)%0A out_file.write('Salted__' + salt)%0A finished = False%0A while not finished:%0A chunk = in_file.read(1024 * bs)%0A if len(chunk) == 0 or len(chunk) %25 bs != 0:%0A padding_length = bs - (len(chunk) %25 bs)%0A chunk += padding_length * chr(padding_length)%0A finished = True%0A out_file.write(cipher.encrypt(chunk))%0A%0A def decrypt_file_aes(self, in_file, out_file, password, key_length=32):%0A bs = AES.block_size%0A salt = in_file.read(bs)%5Blen('Salted__'):%5D%0A key, iv = self.derive_key_and_iv(password, salt, key_length, bs)%0A cipher = AES.new(key, AES.MODE_CBC, iv)%0A next_chunk = ''%0A finished = False%0A while not finished:%0A chunk, next_chunk = next_chunk, cipher.decrypt(in_file.read(1024 * bs))%0A if len(next_chunk) == 0:%0A padding_length = ord(chunk%5B-1%5D)%0A if padding_length %3C 1 or padding_length %3E bs:%0A raise ValueError(%22bad decrypt pad (%25d)%22 %25 padding_length)%0A # all the pad-bytes must be the same%0A if chunk%5B-padding_length:%5D != (padding_length * chr(padding_length)):%0A # this is similar to the bad decrypt:evp_enc.c from openssl program%0A raise ValueError(%22bad decrypt%22)%0A chunk = chunk%5B:-padding_length%5D%0A finished = True%0A out_file.write(chunk)%0A
|
|
1633e8b286ddeec706d496931713e3ac7b93b780
|
Declare flaskext a namespace package
|
flaskext/__init__.py
|
flaskext/__init__.py
|
Python
| 0.000006 |
@@ -0,0 +1,63 @@
+import pkg_resources%0Apkg_resources.declare_namespace(__name__)%0A
|
|
c69572c42da27357f8cb01299c309e47ff033e7f
|
Create docker-swarm-dns.py
|
docker-swarm-dns.py
|
docker-swarm-dns.py
|
Python
| 0.000006 |
@@ -0,0 +1,2111 @@
+#!/usr/bin/env python3.6%0A%0Afrom time import sleep%0Aimport docker, %5C%0A dns.resolver, %5C%0A dns.query, %5C%0A dns.tsigkeyring, %5C%0A dns.update, %5C%0A os, %5C%0A sys%0A%0Aswnodes = %5B'192.168.15.201','192.168.15.202','192.168.15.203','192.168.15.204','192.168.15.205'%5D%0Adnservers = %7B'master':%7B'ip':'192.168.2.6','key':'EMtUbnXU3as1Eczq2bVZ8g=='%7D,'slave':%7B'ip':'192.168.2.7','key':'ctWc6TO3tD9YMV1QYgh9Jg=='%7D%7D%0Adomain = 'subsident.docker.'%0Attl = int(os.environ%5B'UPDATE'%5D)%0A%0Adef docker_query():%0A conn = docker.from_env()%0A serv_pre = set()%0A while True:%0A serv_cur = set()%0A for service in conn.services.list():%0A if 'add.dns' in service.attrs%5B'Spec'%5D%5B'Labels'%5D:%0A if service.attrs%5B'Spec'%5D%5B'Labels'%5D%5B'add.dns'%5D == 'true':%0A serv_cur.add(service.name)%0A if serv_pre != serv_cur:%0A add = serv_cur.difference(serv_pre)%0A rm = serv_pre.difference(serv_cur)%0A if add:%0A print('ADD', add)%0A for svc in add:%0A dns_add(svc)%0A if rm:%0A print('DEL', rm)%0A for svc in rm:%0A dns_remove(svc)%0A serv_pre = serv_cur%0A sleep(ttl)%0A%0Adef dns_add(svc):%0A for host, conf in dnservers.items():%0A print('Add DNS Record %5C''+svc+'%5C' sent to',host,'dnserver ('+conf%5B'ip'%5D+')')%0A keyring = dns.tsigkeyring.from_text(%7B%0A 'rndc-key.' : conf%5B'key'%5D%0A %7D)%0A update = dns.update.Update(domain, keyring=keyring)%0A for swip in swnodes:%0A update.add(svc, 15, 'a', swip)%0A resp = dns.query.tcp(update, conf%5B'ip'%5D)%0Adef dns_remove(svc):%0A for host, conf in dnservers.items():%0A print('Remove DNS Record %5C''+svc+'%5C' sent to',host,'dnserver ('+conf%5B'ip'%5D+')')%0A keyring = dns.tsigkeyring.from_text(%7B%0A 'rndc-key.' : conf%5B'key'%5D%0A %7D)%0A update = dns.update.Update(domain, keyring=keyring)%0A update.delete(svc, 'a')%0A resp = dns.query.tcp(update, conf%5B'ip'%5D)%0A%0Aif __name__ == %22__main__%22:%0A docker_query()%0A
|
|
52dbb4d1f34ef3d637e3d99813591bf12bfa4576
|
support for `python -m intelhex`. Let's provide some help on available "-m" executable points.
|
intelhex/__main__.py
|
intelhex/__main__.py
|
Python
| 0 |
@@ -0,0 +1,1811 @@
+# Copyright (c) 2016, Alexander Belchenko%0D%0A# All rights reserved.%0D%0A#%0D%0A# Redistribution and use in source and binary forms,%0D%0A# with or without modification, are permitted provided%0D%0A# that the following conditions are met:%0D%0A#%0D%0A# * Redistributions of source code must retain%0D%0A# the above copyright notice, this list of conditions%0D%0A# and the following disclaimer.%0D%0A# * Redistributions in binary form must reproduce%0D%0A# the above copyright notice, this list of conditions%0D%0A# and the following disclaimer in the documentation%0D%0A# and/or other materials provided with the distribution.%0D%0A# * Neither the name of the author nor the names%0D%0A# of its contributors may be used to endorse%0D%0A# or promote products derived from this software%0D%0A# without specific prior written permission.%0D%0A#%0D%0A# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS%0D%0A# %22AS IS%22 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,%0D%0A# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY%0D%0A# AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.%0D%0A# IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE%0D%0A# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,%0D%0A# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,%0D%0A# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,%0D%0A# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED%0D%0A# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,%0D%0A# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)%0D%0A# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,%0D%0A# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.%0D%0A%0D%0Aif __name__ == '__main__':%0D%0A print(%22Welcome to IntelHex Python library.%22)%0D%0A print()%0D%0A print(%22The intelhex package has some executable points:%22)%0D%0A print(%22 python -m intelhex.test -- easy way to run unit tests.%22)%0D%0A
|
|
20e41b35237da7c34956fdad5e08ab038a7f58f4
|
Make the user model extendable
|
mongoengine/django/auth.py
|
mongoengine/django/auth.py
|
import datetime
from mongoengine import *
from django.utils.encoding import smart_str
from django.contrib.auth.models import AnonymousUser
from django.utils.translation import ugettext_lazy as _
try:
from django.contrib.auth.hashers import check_password, make_password
except ImportError:
"""Handle older versions of Django"""
from django.utils.hashcompat import md5_constructor, sha_constructor
def get_hexdigest(algorithm, salt, raw_password):
raw_password, salt = smart_str(raw_password), smart_str(salt)
if algorithm == 'md5':
return md5_constructor(salt + raw_password).hexdigest()
elif algorithm == 'sha1':
return sha_constructor(salt + raw_password).hexdigest()
raise ValueError('Got unknown password algorithm type in password')
def check_password(raw_password, password):
algo, salt, hash = password.split('$')
return hash == get_hexdigest(algo, salt, raw_password)
def make_password(raw_password):
from random import random
algo = 'sha1'
salt = get_hexdigest(algo, str(random()), str(random()))[:5]
hash = get_hexdigest(algo, salt, raw_password)
return '%s$%s$%s' % (algo, salt, hash)
REDIRECT_FIELD_NAME = 'next'
class User(Document):
"""A User document that aims to mirror most of the API specified by Django
at http://docs.djangoproject.com/en/dev/topics/auth/#users
"""
username = StringField(max_length=30, required=True,
verbose_name=_('username'),
help_text=_("Required. 30 characters or fewer. Letters, numbers and @/./+/-/_ characters"))
first_name = StringField(max_length=30,
verbose_name=_('first name'))
last_name = StringField(max_length=30,
verbose_name=_('last name'))
email = EmailField(verbose_name=_('e-mail address'))
password = StringField(max_length=128,
verbose_name=_('password'),
help_text=_("Use '[algo]$[iterations]$[salt]$[hexdigest]' or use the <a href=\"password/\">change password form</a>."))
is_staff = BooleanField(default=False,
verbose_name=_('staff status'),
help_text=_("Designates whether the user can log into this admin site."))
is_active = BooleanField(default=True,
verbose_name=_('active'),
help_text=_("Designates whether this user should be treated as active. Unselect this instead of deleting accounts."))
is_superuser = BooleanField(default=False,
verbose_name=_('superuser status'),
help_text=_("Designates that this user has all permissions without explicitly assigning them."))
last_login = DateTimeField(default=datetime.datetime.now,
verbose_name=_('last login'))
date_joined = DateTimeField(default=datetime.datetime.now,
verbose_name=_('date joined'))
meta = {
'indexes': [
{'fields': ['username'], 'unique': True}
]
}
def __unicode__(self):
return self.username
def get_full_name(self):
"""Returns the users first and last names, separated by a space.
"""
full_name = u'%s %s' % (self.first_name or '', self.last_name or '')
return full_name.strip()
def is_anonymous(self):
return False
def is_authenticated(self):
return True
def set_password(self, raw_password):
"""Sets the user's password - always use this rather than directly
assigning to :attr:`~mongoengine.django.auth.User.password` as the
password is hashed before storage.
"""
self.password = make_password(raw_password)
self.save()
return self
def check_password(self, raw_password):
"""Checks the user's password against a provided password - always use
this rather than directly comparing to
:attr:`~mongoengine.django.auth.User.password` as the password is
hashed before storage.
"""
return check_password(raw_password, self.password)
@classmethod
def create_user(cls, username, password, email=None):
"""Create (and save) a new user with the given username, password and
email address.
"""
now = datetime.datetime.now()
# Normalize the address by lowercasing the domain part of the email
# address.
if email is not None:
try:
email_name, domain_part = email.strip().split('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
user = cls(username=username, email=email, date_joined=now)
user.set_password(password)
user.save()
return user
def get_and_delete_messages(self):
return []
class MongoEngineBackend(object):
"""Authenticate using MongoEngine and mongoengine.django.auth.User.
"""
supports_object_permissions = False
supports_anonymous_user = False
supports_inactive_user = False
def authenticate(self, username=None, password=None):
user = User.objects(username=username).first()
if user:
if password and user.check_password(password):
return user
return None
def get_user(self, user_id):
return User.objects.with_id(user_id)
def get_user(userid):
"""Returns a User object from an id (User.id). Django's equivalent takes
request, but taking an id instead leaves it up to the developer to store
the id in any way they want (session, signed cookie, etc.)
"""
if not userid:
return AnonymousUser()
return MongoEngineBackend().get_user(userid) or AnonymousUser()
|
Python
| 0.000605 |
@@ -3130,16 +3130,51 @@
eta = %7B%0A
+ 'allow_inheritance': True,%0A
|
e9e0a0eeaf985e5c8f74dc6cfb9110f7b3c152e4
|
test workers
|
myria/test/test_workers.py
|
myria/test/test_workers.py
|
Python
| 0.00001 |
@@ -0,0 +1,1280 @@
+from httmock import urlmatch, HTTMock%0Afrom json import dumps as jstr%0Aimport unittest%0Afrom myria import MyriaConnection%0A%0A%0A@urlmatch(netloc=r'localhost:8753')%0Adef local_mock(url, request):%0A global query_counter%0A if url.path == '/workers':%0A return jstr(%7B'1': 'localhost:9001', '2': 'localhost:9002'%7D)%0A elif url.path == '/workers/alive':%0A return jstr(%5B1, 2%5D)%0A elif url.path == '/workers/worker-1':%0A return jstr(%22localhost:9001%22)%0A%0A return None%0A%0A%0Aclass TestQuery(unittest.TestCase):%0A def __init__(self, args):%0A with HTTMock(local_mock):%0A self.connection = MyriaConnection(hostname='localhost', port=8753)%0A unittest.TestCase.__init__(self, args)%0A%0A def test_workers(self):%0A with HTTMock(local_mock):%0A workers = self.connection.workers()%0A self.assertEquals(workers, %7B'1': 'localhost:9001',%0A '2': 'localhost:9002'%7D)%0A%0A def test_alive(self):%0A with HTTMock(local_mock):%0A workers = self.connection.workers_alive()%0A self.assertEquals(set(workers), set(%5B1, 2%5D))%0A%0A def test_worker_1(self):%0A with HTTMock(local_mock):%0A worker = self.connection.worker(1)%0A self.assertEquals(worker, 'localhost:9001')%0A
|
|
a58723655ae2c1840d11e6f15ac20acfecb89e0e
|
add optional where argument for searching by metadata properties
|
planet/scripts/__init__.py
|
planet/scripts/__init__.py
|
import click
import json
import logging
from planet import api
import sys
client = api.Client()
pretty = click.option('-pp', '--pretty', default=False, is_flag=True)
scene_type = click.option('-s', '--scene-type', default='ortho')
dest_dir = click.option('-d', '--dest', help='Destination directory',
type=click.Path(file_okay=False, resolve_path=True))
def configure_logging(verbosity):
'''configure logging via verbosity level of between 0 and 2 corresponding
to log levels warning, info and debug respectfully.'''
log_level = max(logging.DEBUG, logging.WARNING - logging.DEBUG*verbosity)
logging.basicConfig(stream=sys.stderr, level=log_level)
def click_exception(ex):
if type(ex) is api.APIException:
raise click.ClickException('Unexpected response: %s' % ex.message)
msg = "%s: %s" % (type(ex).__name__, ex.message)
raise click.ClickException(msg)
def call_and_wrap(func, *args, **kw):
'''call the provided function and wrap any API exception with a click
exception. this means no stack trace is visible to the user but instead
a (hopefully) nice message is provided.
note: could be a decorator but didn't play well with click
'''
try:
return func(*args, **kw)
except api.APIException, ex:
click_exception(ex)
def check_futures(futures):
for f in futures:
try:
f.result()
except api.InvalidAPIKey, invalid:
click_exception(invalid)
except api.APIException, other:
click.echo('WARNING %s' % other.message)
@click.group()
@click.option('-w', '--workers', default=4)
@click.option('-v', '--verbose', count=True)
@click.option('-k', '--api-key',
help='Valid API key - or via env variable %s' % api.ENV_KEY)
@click.option('-u', '--base-url', help='Optional for testing')
def cli(verbose, api_key, base_url, workers):
configure_logging(verbose)
'''Planet API Client'''
if api_key:
client.api_key = api_key
if base_url:
client.base_url = base_url
client._workers = workers
@cli.command('list-scene-types')
def list_all_scene_types():
'''List all scene types.'''
click.echo(call_and_wrap(client.list_all_scene_types).get_raw())
@scene_type
@dest_dir
@click.argument('scene_ids', nargs=-1)
@click.option('--product',
type=click.Choice(
["band_%d" % i for i in range(1, 12)] +
['visual', 'analytic', 'qa']
), default='visual')
@cli.command('download')
@click.pass_context
def fetch_scene_geotiff(ctx, scene_ids, scene_type, product, dest):
'''Fetch full scene image(s)'''
if len(scene_ids) == 0:
src = click.open_file('-')
if not src.isatty():
scene_ids = map(lambda s: s.strip(), src.readlines())
else:
click.echo(ctx.get_usage())
futures = client.fetch_scene_geotiffs(scene_ids, scene_type, product,
api.write_to_file(dest))
check_futures(futures)
@scene_type
@dest_dir
@click.argument("scene-ids", nargs=-1)
@click.option('--size', type=click.Choice(['sm', 'md', 'lg']), default='md')
@click.option('--format', 'fmt', type=click.Choice(['png', 'jpg', 'jpeg']),
default='png')
@cli.command('thumbnails')
def fetch_scene_thumbnails(scene_ids, scene_type, size, fmt, dest):
'''Fetch scene thumbnail(s)'''
if len(scene_ids) == 0:
src = click.open_file('-')
if not src.isatty():
scene_ids = map(lambda s: s.strip(), src.readlines())
futures = client.fetch_scene_thumbnails(scene_ids, scene_type, size, fmt,
api.write_to_file(dest))
check_futures(futures)
@pretty
@scene_type
@click.argument('id', nargs=1)
@cli.command('metadata')
def fetch_scene_info(id, scene_type, pretty):
'''Fetch scene metadata'''
res = call_and_wrap(client.fetch_scene_info, id, scene_type).get_raw()
if pretty:
res = json.dumps(json.loads(res), indent=2)
click.echo(res)
@pretty
@scene_type
@cli.command('search')
@click.argument("aoi", default="-", required=False)
@click.option('--count', type=click.INT, required=False)
def get_scenes_list(scene_type, pretty, aoi, count):
'''Get a list of scenes'''
if aoi == "-":
src = click.open_file('-')
if not src.isatty():
lines = src.readlines()
aoi = ''.join([line.strip() for line in lines])
else:
aoi = None
res = call_and_wrap(client.get_scenes_list, scene_type=scene_type,
intersects=aoi, count=count).get_raw()
if pretty:
res = json.dumps(json.loads(res), indent=2)
click.echo(res)
|
Python
| 0 |
@@ -4229,16 +4229,152 @@
ed=False
+, help=%22Set the number of returned scenes.%22)%[email protected](%22--where%22, nargs=3, multiple=True, help=%22Provide additional search criteria.%22
)%0Adef ge
@@ -4417,16 +4417,23 @@
i, count
+, where
):%0A '
@@ -4675,17 +4675,193 @@
= None%0A
+ %0A if where:%0A conditions = %7B%0A %22%25s.%25s%22 %25 condition%5B0:2%5D: condition%5B2%5D%0A for condition in where%0A %7D%0A else:%0A conditions = %7B%7D%0A
%0A
-
res
@@ -4923,16 +4923,16 @@
e_type,%0A
-
@@ -4974,16 +4974,30 @@
nt=count
+, **conditions
).get_ra
|
2349d603ca887961441b5b3f436d6cffaaecb291
|
Add pyMetascanAPI class
|
pyMetascanAPI.py
|
pyMetascanAPI.py
|
Python
| 0 |
@@ -0,0 +1,1220 @@
+import requests%0Aimport os%0A%0Aclass pyMetascanAPI:%0A API_ENDPOINT = 'https://api.metascan-online.com/v1/'%0A API_KEY = ''%0A%0A FILE_EXT = 'file'%0A DATA_EXT = 'file/'%0A HASH_EXT = 'hash/'%0A%0A def __init__(self, api_key):%0A self.API_KEY = api_key%0A%0A def fileUpload(self, file):%0A r = self.makeRequest(self.getFileEndpoint(), 'POST', file)%0A return r.json()%0A%0A def retrieveReport(self, data_id):%0A r = self.makeRequest(self.getDataEndpoint(data_id))%0A return r.json()%0A%0A def hashLookup(self, hash):%0A r = self.makeRequest(self.getHashEndpoint(hash))%0A return r.json()%0A%0A def makeRequest(self, url, method='GET', file=None):%0A headers = %7B'apikey' : self.API_KEY%7D%0A%0A if method == 'POST':%0A headers.update(%7B'filename' : os.path.basename(file)%7D)%0A return requests.post(url, file, headers=headers)%0A else:%0A return requests.get(url, headers=headers)%0A%0A def getFileEndpoint(self):%0A return self.API_ENDPOINT + self.FILE_EXT%0A%0A def getDataEndpoint(self, data_id):%0A return self.API_ENDPOINT + self.DATA_EXT + data_id%0A%0A def getHashEndpoint(self, hash):%0A return self.API_ENDPOINT + self.HASH_EXT + hash%0A
|
|
feefc96050d3906730fe6d366430d7478204d168
|
Add solution to 121.
|
121/121.py
|
121/121.py
|
Python
| 0.000026 |
@@ -0,0 +1,2302 @@
+%22%22%22%0AA bag contains one red disc and one blue disc. In a game of chance a player%0Atakes a disc at random and its colour is noted. After each turn the disc is%0Areturned to the bag, an extra red disc is added, and another disc is taken at%0Arandom.%0A%0AThe player pays %C2%A31 to play and wins if they have taken more blue discs than red%0Adiscs at the end of the game.%0A%0AIf the game is played for four turns, the probability of a player winning is%0Aexactly 11/120, and so the maximum prize fund the banker should allocate for%0Awinning in this game would be %C2%A310 before they would expect to incur a loss.%0ANote that any payout will be a whole number of pounds and also includes the%0Aoriginal %C2%A31 paid to play the game, so in the example given the player actually%0Awins %C2%A39.%0A%0AFind the maximum prize fund that should be allocated to a single game in which%0Afifteen turns are played.%0A%0A%0ASolution comment: Very quick, ~0.5 ms (would be much less for C++ impl.).%0ADerived explicitly the N = 4 case above, from which the pattern emerges. The%0Amemoization helps a whole bunch. Would have been slightly better to use DP and%0Abuild the table directly, but memoizing a recursive solution is so much nicer.%0A%22%22%22%0A%0Aimport time%0Aimport numpy as np%0Afrom functools import lru_cache%0A%0Adef P(n):%0A %22%22%22Probability of drawing a blue in the n-th round.%22%22%22%0A return 1 / (n + 1)%0A%0A@lru_cache(maxsize=None)%0Adef Prob(blues, n, N):%0A %22%22%22%0A Return probability of getting some number of blues, starting on round n,%0A with N rounds total.%0A %22%22%22%0A if blues %3C= 0:%0A return 1 # Can always get 0 blues.%0A elif blues %3E N - n + 1:%0A return 0 # Can never get more blues than draws.%0A elif blues == N - n + 1:%0A return np.prod(P(np.arange(n, N+1))) # Prob of getting blue of every draw.%0A else:%0A # Prob is prob of getting a blue now and then blues-1 on the remaining throws,%0A # or prob of getting red now and blues blues on the remaining throws.%0A return P(n) * Prob(blues - 1, n + 1, N) + (1-P(n)) * Prob(blues, n + 1, N)%0A%0Aif __name__ == %22__main__%22:%0A t0 = time.time()%0A rounds = 15%0A bet = 1%0A winning_prob = Prob(rounds // 2 + 1, 1, rounds)%0A print('Execution time: %7B:.3f%7D ms'.format((time.time() - t0) * 1e3))%0A print('Max payout with %7B%7D rounds: %7B%7D%C2%A3'.format(rounds, int(bet / winning_prob)))%0A
|
|
7abe3e8039162fcb1eb5a1c40c2b22a89122e103
|
Use LLDB in gypv8sh to debug random crashes.
|
tools/gypv8sh.py
|
tools/gypv8sh.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script is used by chrome_tests.gypi's js2webui action to maintain the
argument lists and to generate inlinable tests.
"""
import json
import optparse
import os
import subprocess
import sys
import shutil
def main ():
parser = optparse.OptionParser()
parser.set_usage(
"%prog v8_shell mock.js test_api.js js2webui.js "
"testtype inputfile inputrelfile cxxoutfile jsoutfile")
parser.add_option('-v', '--verbose', action='store_true')
parser.add_option('-n', '--impotent', action='store_true',
help="don't execute; just print (as if verbose)")
parser.add_option('--deps_js', action="store",
help=("Path to deps.js for dependency resolution, " +
"optional."))
(opts, args) = parser.parse_args()
if len(args) != 9:
parser.error('all arguments are required.')
(v8_shell, mock_js, test_api, js2webui, test_type,
inputfile, inputrelfile, cxxoutfile, jsoutfile) = args
cmd = [v8_shell]
icudatafile = os.path.join(os.path.dirname(v8_shell), 'icudtl.dat')
if os.path.exists(icudatafile):
cmd.extend(['--icu-data-file=%s' % icudatafile])
arguments = [js2webui, inputfile, inputrelfile, opts.deps_js,
cxxoutfile, test_type]
cmd.extend(['-e', "arguments=" + json.dumps(arguments), mock_js,
test_api, js2webui])
if opts.verbose or opts.impotent:
print cmd
if not opts.impotent:
try:
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0)
out, err = p.communicate()
if p.returncode:
# TODO(jochen): Remove once crbug.com/370551 is resolved.
if sys.platform == 'darwin':
cmd[:0] = ['gdb', '-batch', '-ex', 'run', '-ex', 'bt', '-ex', 'quit',
'-args']
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0)
out, err = p.communicate()
raise Exception('Failed to run d8', out, err)
with open(cxxoutfile, 'wb') as f:
f.write(out)
shutil.copyfile(inputfile, jsoutfile)
except Exception, ex:
if os.path.exists(cxxoutfile):
os.remove(cxxoutfile)
if os.path.exists(jsoutfile):
os.remove(jsoutfile)
raise
if __name__ == '__main__':
sys.exit(main())
|
Python
| 0.000004 |
@@ -1890,77 +1890,169 @@
-cmd%5B:0%5D = %5B'gdb', '-batch', '-ex', 'run', '-ex', 'bt', '-ex', 'quit',
+sys.path.insert(0, '/Developer/Library/PrivateFrameworks/'%0A 'LLDB.framework/Resources/Python')%0A try:%0A import lldb
%0A
@@ -2050,32 +2050,40 @@
lldb%0A
+except:%0A
'-arg
@@ -2081,217 +2081,1035 @@
-'-args'%5D%0A p = subprocess.Popen(%0A cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0)%0A out, err = p.communicate()%0A raise Exception('Failed to run d8', out, err)
+ raise Exception(%22Could not load lldb module%22)%0A debugger = lldb.SBDebugger.Create()%0A debugger.SetAsync(False)%0A target = debugger.CreateTargetWithFileAndArch(%0A cmd%5B0%5D, lldb.LLDB_ARCH_DEFAULT)%0A if not target:%0A raise Exception(%22Failed to create d8 target%22)%0A process = target.LaunchSimple(cmd%5B1:%5D, None, os.getcwd())%0A if not process:%0A raise Exception(%22Failed to start d8%22)%0A if process.GetState() == lldb.eStateStopped:%0A for thread in process:%0A print %22Thread (id %25d)%22 %25 thread.GetThreadID()%0A for frame in thread:%0A print frame%0A print %22%22%0A raise Exception(%0A %22d8 crashed, please report this at http://crbug.com/370551%22)%0A else:%0A # For some reason d8 worked this time...%0A out = ''%0A while True:%0A s = process.GetSTDOUT(4096)%0A if s == '':%0A break%0A out += s%0A
%0A
|
5e220c5529ca7279979939716c28997876145b7b
|
Create ac_cover_pic_down.py
|
ac_cover_pic_down.py
|
ac_cover_pic_down.py
|
Python
| 0.00001 |
@@ -0,0 +1,1556 @@
+#coding=utf-8%0Aimport urllib%0Aimport urllib2%0Aimport os%0Acover='http://cover.acfunwiki.org/cover.php'%0Aface='http://cover.acfunwiki.org/face.php'%0Anow=1%0Alocal=os.getcwd()+'%5C%5Cdownload%5C%5C'%0Aurl_1=face#%E8%AE%BE%E7%BD%AE%E6%9D%A5%E6%BA%90%0Aexist=0%0Asuccess=0%0Afail=0%0Aall=0%0Adef download(num,yes):%0A global now%0A global exist%0A global success%0A global fail%0A global all%0A try:#%E5%88%9B%E5%BB%BA%E7%9B%AE%E5%BD%95%0A os.makedirs(local)%0A except WindowsError:%0A None%0A if num %3E0:%0A while now%3C=num:%0A url= urllib2.urlopen(url_1).geturl()%0A file= url%5Burl.rfind('/')+1:%5D%0A if os.path.exists(local+file):%0A print now,'X',file,u'%E5%B7%B2%E5%AD%98%E5%9C%A8'%0A exist=exist+1%0A if yes==0:%0A now=now+1%0A else:%0A try:#%E4%B8%8B%E8%BD%BD%0A urllib.urlretrieve(url,local+file)%0A print now,'%E2%88%9A',file,u'%E4%B8%8B%E8%BD%BD%E6%88%90%E5%8A%9F'%0A success=success+1%0A now=now+1%0A except IOError:%0A print now,'X',file,u'%E4%B8%8B%E8%BD%BD%E5%A4%B1%E8%B4%A5%EF%BC%81%EF%BC%81'%0A fail=fail+1%0A if yes==0:%0A now=now+1%0A all=all+1%0A print u'%E7%BB%93%E6%9D%9F'%0A print u'%E5%85%B1%E4%B8%8B%E8%BD%BD',str(all),u'%E6%88%90%E5%8A%9F',str(success),u'%E5%B7%B2%E5%AD%98%E5%9C%A8',str(exist),u'%E5%A4%B1%E8%B4%A5',str(fail)%0A now=1%0A num=0%0A yes=0%0A all=0%0A main()%0A%0Adef main():%0A input=raw_input(u'%E8%BE%93%E5%85%A5%E4%B8%8B%E8%BD%BD%E4%B8%AA%E6%95%B0%EF%BC%9A')%0A print u'%E5%BD%93%E5%89%8D%E6%9D%A5%E6%BA%90%EF%BC%9A',url_1%0A print u'%E4%B8%8B%E8%BD%BD%E7%9B%AE%E5%BD%95:',local%0A download(int(input),1)#%E5%8F%82%E6%95%B0%E4%BA%8C%E4%B8%BA%E5%A4%B1%E8%B4%A5%E6%88%96%E5%B7%B2%E5%AD%98%E5%9C%A8%E6%96%87%E4%BB%B6%E6%98%AF%E5%90%A6%E4%B8%8D%E7%AE%97%E5%85%A5%E5%B7%B2%E4%B8%8B%E8%BD%BD%E9%87%8F'now'(%E6%98%AF1/%E5%90%A60)%0A%0Aif __name__ == '__main__':%0A try:%0A main()%0A except KeyboardInterrupt:%0A print u'######%5Cn'%0A print u'%E7%94%A8%E6%88%B7%E4%B8%AD%E6%96%AD'%0A
|
|
f6b720a2603cc597bdbe4124ad8e13b9a208274e
|
Create wordcloudtest.py
|
src/ml/wordcloudtest.py
|
src/ml/wordcloudtest.py
|
Python
| 0.000372 |
@@ -0,0 +1,988 @@
+#encoding=utf8%0Afrom pyecharts import WordCloud%0Afrom snownlp import SnowNLP%0Aimport jieba%0A%0A##%E8%AF%8D%E4%BA%91%0A%0Afilename = %22wdqbs.txt%22%0Awith open(filename) as f:%0A mytext = f.read()%0A#print mytext%0A%0As= SnowNLP(unicode(mytext,'utf8'))%0Afor word in s.keywords(10):%0A print word.encode('utf8')%0A%0Aseg_list = jieba.cut(mytext)%0A%0Apunct = set(u''':!),.:;?%5D%7D%C2%A2'%22%E3%80%81%E3%80%82%E3%80%89%E3%80%8B%E3%80%8D%E3%80%8F%E3%80%91%E3%80%95%E3%80%97%E3%80%9E%EF%B8%B0%EF%B8%B1%EF%B8%B3%EF%B9%90%EF%BD%A4%EF%B9%92%0A%EF%B9%94%EF%B9%95%EF%B9%96%EF%B9%97%EF%B9%9A%EF%B9%9C%EF%B9%9E%EF%BC%81%EF%BC%89%EF%BC%8C%EF%BC%8E%EF%BC%9A%EF%BC%9B%EF%BC%9F%EF%BD%9C%EF%BD%9D%EF%B8%B4%EF%B8%B6%EF%B8%B8%EF%B8%BA%EF%B8%BC%EF%B8%BE%EF%B9%80%EF%B9%82%EF%B9%84%EF%B9%8F%EF%BD%A4%EF%BD%9E%EF%BF%A0%0A%E3%80%85%E2%80%96%E2%80%A2%C2%B7%CB%87%CB%89%E2%80%95--%E2%80%B2%E2%80%99%E2%80%9D(%5B%7B%C2%A3%C2%A5'%22%E2%80%B5%E3%80%88%E3%80%8A%E3%80%8C%E3%80%8E%E3%80%90%E3%80%94%E3%80%96%EF%BC%88%EF%BC%BB%EF%BD%9B%EF%BF%A1%EF%BF%A5%E3%80%9D%EF%B8%B5%EF%B8%B7%EF%B8%B9%EF%B8%BB%0A%EF%B8%BD%EF%B8%BF%EF%B9%81%EF%B9%83%EF%B9%99%EF%B9%9B%EF%B9%9D%EF%BC%88%EF%BD%9B%E2%80%9C%E2%80%98-%E2%80%94_%E2%80%A6''')%0A# %E5%AF%B9str/unicode%0Afilterpunt = lambda s: ''.join(filter(lambda x: x not in punct, s))%0A# %E5%AF%B9list%0Afilterpuntl = lambda l: list(filter(lambda x: x not in punct, l))%0A%0Adict=%7B%7D%0Afor word in filterpuntl(seg_list):%0A if word in dict:%0A dict%5Bword%5D=int(dict%5Bword%5D)+1%0A else:%0A dict%5Bword%5D=1%0Aname=%5B%5D%0Afor word in dict.keys():%0A name.append(word.encode('utf8'))%0Aprint name%0Avalue = dict.values()%0Aprint value%0Awordcloud = WordCloud(width=1300, height=620)%0Awordcloud.add(%22%22, name, value, word_size_range=%5B20, 100%5D)%0Awordcloud.show_config()%0Awordcloud.render()%0A
|
|
d062a109da7ba5cb6147fac90bb4c6466083c755
|
Create __init__.py
|
SlackBotFramework/utilities/__init__.py
|
SlackBotFramework/utilities/__init__.py
|
Python
| 0.000429 |
@@ -0,0 +1,640 @@
+def send_card(client, channel, title, title_url, text, fields=None,%0A bot_name=%22Bot%22, color=%22#36a64f%22,%0A fallback=%22There was an error please try again%22):%0A attr = %5B%7B%0A %22fallback%22: fallback,%0A %22color%22: color,%0A %22title%22: title,%0A %22title_link%22: title_url,%0A%0A %22text%22: text%0A %7D%5D%0A%0A if fields:%0A if not isinstance(fields, list):%0A fields = %5Bfields%5D%0A attr%5B0%5D%5B'fields'%5D = fields%0A%0A return client.api_call(%0A %22chat.postMessage%22,%0A as_user=True,%0A username=bot_name,%0A channel=channel,%0A text=%22%22,%0A attachments=json.dumps(attr))%0A
|
|
e64dbcd16959078bc4df1b6a536ea3f36ae52411
|
add cli directory
|
ec2/cli/__init__.py
|
ec2/cli/__init__.py
|
Python
| 0.000001 |
@@ -0,0 +1,37 @@
+#%0A# Copyright (c) 2007 rPath, Inc.%0A#%0A
|
|
7bea9ba96c9d036692882fcbae5fcc1974567530
|
Add preprocessing.py
|
preprocessing/preprocessing.py
|
preprocessing/preprocessing.py
|
Python
| 0.000359 |
@@ -0,0 +1,2152 @@
+#! /usr/bin/env python%0A# coding:utf-8%0A%0Aimport re%0A%0A%0Aclass Preprocess:%0A%0A def __init__(self):%0A self.html_regex = re.compile(%0A r'(http%7Chttps)://%5Ba-zA-Z0-9-./%22#$%25&%5C':?=_%5D+')%0A self.newline_regex = re.compile(r'%5Cn')%0A self.cont_spaces_regex = re.compile(r'%5Cs+')%0A%0A def _subs(self, regex: %22re obj%22, repl: str, text: str):%0A return regex.sub(repl, text)%0A%0A def remove_link(self, text: str) -%3E str:%0A return self._subs(self.html_regex, %22%22, text)%0A%0A def remove_newline(self, text: str) -%3E str:%0A return self._subs(self.newline_regex, %22%22, text)%0A%0A def convert_cont_spaces(self, text: str) -%3E str:%0A return self._subs(self.cont_spaces_regex, %22 %22, text)%0A%0A def strip(self, text: str) -%3E str:%0A return text.strip()%0A%0A def execute(self, text: str) -%3E str:%0A funcs = %5B%0A self.remove_newline,%0A self.remove_link,%0A self.convert_cont_spaces,%0A self.strip%5D%0A _text = text%0A for func in funcs:%0A _text = func(text)%0A return _text%0A%0A%0Aclass Twitter(Preprocess):%0A%0A def __init__(self):%0A Preprocess.__init__(self)%0A username = r'@%5Ba-zA-Z0-9_%5D+'%0A self.mention_regex = re.compile(r'%7B%7D'.format(username))%0A self.retweet_regex = re.compile(r'RT %7B%7D:'.format(username))%0A%0A def remove_mention(self, text: str) -%3E str:%0A return self._subs(self.mention_regex, %22%22, text)%0A%0A def remove_retweet(self, text: str) -%3E str:%0A return self._subs(self.retweet_regex, %22%22, text)%0A%0A def execute(self, text: str) -%3E str:%0A funcs = %5B%0A self.remove_newline,%0A self.remove_link,%0A self.remove_retweet,%0A self.remove_mention,%0A self.convert_cont_spaces,%0A self.strip%5D%0A _text = text%0A%0A for func in funcs:%0A _text = func(_text)%0A%0A return _text%0A%0A%0Aif __name__ == '__main__':%0A import sys%0A%0A pre = Preprocess()%0A%0A for filename in sys.argv%5B1:%5D:%0A print(filename)%0A with open(filename, %22r%22) as f:%0A for line in f:%0A _line = line.strip()%0A print(pre.execute(_line))%0A
|
|
c6ff3e3e67194499d1653d530a29e3856191fd1e
|
Create Grau.py
|
backend/Models/Grau/Grau.py
|
backend/Models/Grau/Grau.py
|
Python
| 0 |
@@ -0,0 +1,140 @@
+class Departamento(object):%0A%09def __init__(self,departamento):%0A%09%09self.id = departamento.getId()%0A%09%09self.nome = departamento.getNome()%0A%09%09%0A %0A
|
|
a136eeefdd6cf276a0d4815fa39453737ed04727
|
Add py solution for 556. Next Greater Element III
|
py/next-greater-element-iii.py
|
py/next-greater-element-iii.py
|
Python
| 0.000001 |
@@ -0,0 +1,614 @@
+class Solution(object):%0A def nextGreaterElement(self, n):%0A %22%22%22%0A :type n: int%0A :rtype: int%0A %22%22%22%0A s = str(n)%0A for i, n in enumerate(reversed(s%5B:-1%5D), 1):%0A if n %3C s%5B-i%5D:%0A x, j = min((x, k) for k, x in enumerate(s%5B-i:%5D) if x %3E n)%0A ans = s%5B:-i - 1%5D%0A ans += x%0A l = list(s%5B-i:%5D)%0A l%5Bj%5D = n%0A ans += ''.join(sorted(l))%0A ans = int(ans)%0A if ans %3E= 1 %3C%3C 31:%0A return -1%0A return ans%0A else:%0A return -1%0A
|
|
1c2eebe236dcfcc607749ebcba7a769bb27b5176
|
test creation of blank CounterJournal item
|
pycounter/test/test_classes.py
|
pycounter/test/test_classes.py
|
Python
| 0 |
@@ -0,0 +1,212 @@
+import unittest%0A%0Afrom pycounter import report%0A%0A%0Aclass TestJournalClass(unittest.TestCase):%0A def test_counter_journal(self):%0A journal = report.CounterJournal()%0A self.assertEqual(journal.issn, %22%22)%0A
|
|
feb47562d45294cb4e9c3ae2d0bc80b7b766bcc8
|
Create pKaKs3.py
|
Modules/pKaKs3.py
|
Modules/pKaKs3.py
|
Python
| 0.000001 |
@@ -0,0 +1,830 @@
+#This short script uses the output values of KaKs.pl & SnpEff to calculate mutational load using Nei-Gojobori: pKa/Ks = %5B-3/4ln(1-4pn/3)%5D / %5B-3/4ln(1-4ps/3)%5D, where ps = syn SNPs / syn sites and pn = nonsyn SNPs / nonsyn sites%0A%0Afrom math import log #If for some reason you need to calculate the logarithm of a negative number, import cmath instead.%0Aimport configparser%0A%0Aconfig = configparser.RawConfigParser()%0Aconfig.read(%22config.ini%22)%0AnonSyn_site = float(config.get(%22myvars%22, %22non-synonymous_number%22))%0ASyn_site = float(config.get(%22myvars%22, %22synonymous_number%22))%0AnonSyn_SNP = float(config.get(%22myvars%22, %22non-synonymous_snp%22))%0ASyn_SNP = float(config.get(%22myvars%22, %22synonymous_snp%22))%0A%0Apn = nonSyn_SNP/nonSyn_site%0Aps = Syn_SNP/Syn_site%0A%0Aprint(%22The pKs/Ks ratio for this organism is:%22, (-3/4*log(1-(4*pn)/3))/(-3/4*log(1-(4*ps)/3)) )%0A
|
|
ed611e9f9c3470712b296188e5ee6e2432cb04b5
|
Add scanner
|
PyARPScanner.py
|
PyARPScanner.py
|
Python
| 0.000001 |
@@ -0,0 +1,2146 @@
+#!/usr/bin/env python%0Aimport netifaces%0Aimport commands%0Aimport sys%0Afrom scapy.all import *%0A%0A%0Adef scanner():%0A # default = %22route %7C grep 'default' %7C awk '%7Bprint $8%7D'%22%0A gws = netifaces.gateways()%0A default = gws%5B'default'%5D%5Bnetifaces.AF_INET%5D%0A print 'Default Interface -- '+default%5B1%5D+' Gateway -- '+default%5B0%5D%0A # diface = commands.getoutput(default)%0A diface = default%5B1%5D%0A srcip = netifaces.ifaddresses(diface)%5B2%5D%5B0%5D%5B'addr'%5D%0A netmask = netifaces.ifaddresses(diface)%5B2%5D%5B0%5D%5B'netmask'%5D%0A octets = srcip.split('.')%0A starttime = time.time()%0A global gw%0A gw = octets%5B0%5D + %22.%22 + octets%5B1%5D + %22.%22 + octets%5B2%5D%0A dest = gw + %22.0/24%22%0A # print dest%0A answered, unanswered = srp(Ether(dst=%22ff:ff:ff:ff:ff:ff%22) / ARP(pdst=str(dest)), timeout=2, verbose=0)%0A endtime = time.time()%0A ifaces = %22ifconfig %7C grep -o %22 + str(diface) + %22 %7C wc -l%22%0A num = int(commands.getoutput(ifaces))%0A setips = defaultdict(list)%0A setips%5Bdiface%5D.append(str(srcip))%0A existing = %5Bsrcip%5D%0A freeips = %5B%5D%0A totaltime = endtime - starttime%0A print %22Sent ARP requests in %25f seconds...%22 %25 (totaltime)%0A for i in range(0, num - 1):%0A iface = diface + %22:%22 + str(i)%0A ip = netifaces.ifaddresses(iface)%5B2%5D%5B0%5D%5B'addr'%5D%0A setips%5Biface%5D.append(str(ip))%0A existing.append(str(ip))%0A # print setips%0A for i in range(0,len(answered)):%0A %09print %22Response from ip -- %22 + answered%5Bi%5D%5B1%5D.psrc + %22 using MAC -- %22 + answered%5Bi%5D%5B1%5D.hwsrc%0A print %22Found %25d ips that are already set to this computer.%22 %25 (len(setips))%0A for i in range(0, len(unanswered)):%0A freeips.append(str(unanswered%5Bi%5D%5B1%5D.pdst))%0A freeips = set(freeips) - set(existing)%0A freeips.remove(gw + '.0')%0A freeips.remove(gw + '.255')%0A # freeips.remove(gw+'.1')%0A print %22Found %25d ips that are free.%22 %25 (len(freeips))%0A completedtime = time.time()%0A totaltime = completedtime - starttime%0A print %22Completed scan in %25f seconds...%22 %25 totaltime%0A print 'The following ips are set to this computer',existing%0A # unanswered = unanswered.remove(srcip)%0A # return freeips%0A # print setips%0A%0Aif __name__ == '__main__':%0A scanner()
|
|
fba217df1b1361eb57550528932c913739441680
|
Fix definition of interactive
|
custom/fri/api.py
|
custom/fri/api.py
|
import random
import re
from casexml.apps.case.models import CommCareCase
from custom.fri.models import (
PROFILE_A,
PROFILE_B,
PROFILE_C,
PROFILE_D,
PROFILE_E,
PROFILE_F,
PROFILE_G,
FRIMessageBankMessage,
FRIRandomizedMessage,
)
def letters_only(text):
return re.sub(r"[^a-zA-Z]", "", text).upper()
def get_interactive_participants(domain):
cases = CommCareCase.view("hqcase/types_by_domain", key=[domain, "participant"], include_docs=True, reduce=False).all()
result = []
for case in cases:
study_arm = case.get_case_property("study_arm")
if (not case.closed) and isinstance(study_arm, basestring) and study_arm.upper() == "A":
result.append(case)
return result
def get_message_bank(domain, risk_profile=None, for_comparing=False):
if risk_profile is not None:
messages = FRIMessageBankMessage.view("fri/message_bank", key=[domain, risk_profile], include_docs=True).all()
else:
messages = FRIMessageBankMessage.view("fri/message_bank", startkey=[domain], endkey=[domain, {}], include_docs=True).all()
if for_comparing:
result = []
for message in messages:
result.append({
"message" : message,
"compare_string" : letters_only(message.message),
})
return result
else:
return messages
def add_metadata(sms, message_bank_messages):
"""
sms - an instance of FRISMSLog
message_bank_messages - the result from calling get_message_bank(for_comparing=True)
"""
text = letters_only(sms.text)
for entry in message_bank_messages:
if entry["compare_string"] in text:
sms.message_bank_message_id = entry["message"]._id
sms.fri_id = entry["message"].fri_id
sms.risk_profile = entry["message"].risk_profile
sms.theory_code = entry["message"].theory_code
break
sms.message_bank_lookup_completed = True
try:
sms.save()
except Exception:
# No big deal, we'll just perform the lookup again the next time it's needed, and
# try to save it again then.
pass
def randomize_messages(case):
"""
Create a randomized list of 280 messages for the case, based on its risk profile.
"""
message_list = []
risk_profiles = case.get_case_property("risk_profiles").upper()
# Add messages specific to each risk profile
if PROFILE_A in risk_profiles:
message_list += get_message_bank(case.domain, risk_profile=PROFILE_A)
if PROFILE_B in risk_profiles:
message_list += get_message_bank(case.domain, risk_profile=PROFILE_B)
if PROFILE_C in risk_profiles:
message_list += get_message_bank(case.domain, risk_profile=PROFILE_C)
if PROFILE_D in risk_profiles:
message_list += get_message_bank(case.domain, risk_profile=PROFILE_D)
if PROFILE_E in risk_profiles:
message_list += get_message_bank(case.domain, risk_profile=PROFILE_E)
if PROFILE_F in risk_profiles:
message_list += get_message_bank(case.domain, risk_profile=PROFILE_F)
# Add generic messages to get to 280
additional_messages_required = 280 - len(message_list)
if additional_messages_required > 0:
generic_messages = get_message_bank(case.domain, risk_profile=PROFILE_G)
random.shuffle(generic_messages)
for i in range(additional_messages_required):
message_list.append(generic_messages[i])
# Randomize the list, and save
random.shuffle(message_list)
order = 0
for message in message_list:
randomized_message = FRIRandomizedMessage(
domain = case.domain,
case_id = case._id,
message_bank_message_id = message._id,
order = order,
)
randomized_message.save()
order += 1
def get_randomized_message(case, order):
return FRIRandomizedMessage.view("fri/randomized_message", key=[case.domain, case._id, order], include_docs=True).one()
def custom_content_handler(reminder, handler, recipient):
"""
This method is invoked from the reminder event-handling thread to retrieve
the next message to send.
"""
case = reminder.case
order = ((reminder.schedule_iteration_num - 1) * 35) + reminder.current_event_sequence_num
randomized_message = get_randomized_message(case, order)
if randomized_message is None:
randomize_messages(case)
randomized_message = get_randomized_message(case, order)
message = FRIMessageBankMessage.get(randomized_message.message_bank_message_id)
return message.message
|
Python
| 0.000006 |
@@ -17,16 +17,103 @@
port re%0A
+import pytz%0Afrom dateutil.parser import parse%0Afrom datetime import datetime, timedelta%0A
from cas
@@ -345,16 +345,69 @@
ssage,%0A)
+%0Afrom corehq.apps.reports import util as report_utils
%0A%0Adef le
@@ -653,24 +653,159 @@
result = %5B%5D%0A
+ timezone = report_utils.get_timezone(None, domain) # Use project timezone only%0A current_date = datetime.now(tz=timezone).date()%0A
for case
@@ -886,30 +886,8 @@
if
-(not case.closed) and
isin
@@ -944,18 +944,281 @@
) == %22A%22
-:%0A
+ and not case.closed:%0A start_date = case.get_case_property(%22start_date%22)%0A start_date = parse(start_date).date()%0A end_date = start_date + timedelta(days=55)%0A if current_date %3E= start_date and current_date %3C= end_date:%0A
|
ba8eb16640a40f9c2f361251adecb8c91d1c9a07
|
create stream.py
|
PhloxAR/stream.py
|
PhloxAR/stream.py
|
Python
| 0.000001 |
@@ -0,0 +1,178 @@
+# -*- coding: utf-8 -*-%0A%0Afrom __future__ import division, print_function%0Afrom __future__ import absolute_import, unicode_literals%0A# TODO: more detailed%0Afrom PhloxAR.base import *
|
|
e4b108fa5c0221eb2b585550b04be14ff56d26e5
|
Add Toy playlist creation
|
Toy_Playlist.py
|
Toy_Playlist.py
|
Python
| 0 |
@@ -0,0 +1,1363 @@
+'''%0AWritten by Paul Lamere 06/05/2015%0AAccessed 10/23/2016%0Ahttps://github.com/plamere/spotipy/blob/master/examples/create_playlist.py%0A%0AModified by Stephen Longofono%0A10/23/2016%0A'''%0A%0Aimport sys%0Aimport os%0Aimport subprocess%0A%0Aimport spotipy%0Aimport spotipy.util as util%0A%0A%0Aif len(sys.argv) %3E 2:%0A username = sys.argv%5B1%5D%0A playlist_name = sys.argv%5B2%5D%0Aelse:%0A print(%22Usage: %25s username playlist-name%22 %25 (sys.argv%5B0%5D,))%0A sys.exit()%0A%0Atoken = util.prompt_for_user_token(username)%0A%0Aif token:%0A sp = spotipy.Spotify(auth=token)%0A sp.trace = False%0A playlists = sp.user_playlist_create(username, playlist_name)%0A%0A # Get new songs to add from file%0A try:%0A songIDs = %5B%5D%0A songList = open('recommended.txt', 'r')%0A for song in songlist:%0A songIDs.append(song)%0A songList.close()%0A%0A except:%0A print %22Error processing recommendations...%22%0A sys.exit()%0A%0A # Add songs%0A try:%0A for song in songIDs:%0A sp.user_playlist_add_tracks(username, playlist_id, track_ids)%0A except:%0A print %22Error adding songs to playlist...%22%0A sys.exit()%0A%0A # Add to list of already suggested songs%0A x = open('oldsongs', 'a+')%0A for song in songIDs:%0A x.write(str(song))%0A x.write('%5Cn')%0A x.close()%0A%0A # Remove recommended songs%0A%0Aelse:%0A print(%22Can't get token for%22, username)%0A
|
|
32d46fe3e080b13ab9ae9dc3d868e9a724cccda9
|
Add unit test for IosBrowserFinder.
|
tools/telemetry/telemetry/core/backends/chrome/ios_browser_finder_unittest.py
|
tools/telemetry/telemetry/core/backends/chrome/ios_browser_finder_unittest.py
|
Python
| 0.000013 |
@@ -0,0 +1,892 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.%0A# Use of this source code is governed by a BSD-style license that can be%0A# found in the LICENSE file.%0Aimport unittest%0A%0Afrom telemetry.core import browser_options%0Afrom telemetry.core.backends.chrome import ios_browser_finder%0Afrom telemetry.unittest import test%0A%0Aclass IosBrowserFinderUnitTest(unittest.TestCase):%0A # TODO(baxley): Currently the tests require a device with Chrome running.%0A # This should be stubbed out so it runs on any system, with no device%0A # dependencies.%0A @test.Enabled('ios')%0A def testFindIosChrome(self):%0A finder_options = browser_options.BrowserFinderOptions()%0A browsers = ios_browser_finder.FindAllAvailableBrowsers(finder_options)%0A self.assertTrue(browsers)%0A for browser in browsers:%0A self.assertEqual('ios-chrome', browser.browser_type)%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A%0A
|
|
f753711c502b54ad8bf2c992336a5ad002e069bb
|
Create bearing.py
|
server/traffic_calc/bearing.py
|
server/traffic_calc/bearing.py
|
Python
| 0 |
@@ -0,0 +1,1075 @@
+#!/usr/bin/python%0A'''%0A/***************************************************************************************%0AName %09%09 : bearng %0ADescription %09: calculates the bearing(angle) between given two lattitude and %0A%09%09 longitude points %0AParameters %09: l_lat1 and l_lng1 are point one lattitude and longitude respectively %0A%09%09 l_lat2 and l_lng2 are point two lattitude and longitude respectively%0AReturn %09%09 : This function will return the bearing(angle) between given two %0A%09%09 lattitude and longitude points %0A****************************************************************************************/%0A'''%0Aimport math%0Adef bearng(l_lat1,l_lng1,l_lat2,l_lng2):%0A%09l_lat1 = float(l_lat1)%0A%09l_lng1 = float(l_lng1)%0A%09l_lat2 = float(l_lat2)%0A%09l_lng2= float(l_lng2)%0A%09lndif = (l_lng2 - l_lng1)%0A%09y = math.sin(lndif) * math.cos(l_lat1)%09%0A%09x = math.cos(l_lat2) * math.sin(l_lat1) - math.sin(l_lat2) * math.cos(l_lat1)*math.cos(lndif)%0A%09l_brng = math.atan2(y,x)%0A%09l_brng = math.degrees(l_brng)%0A%09l_brng = (l_brng +360)%25360%0A%09l_brng = (360-l_brng)%0A%09return l_brng%0A%0A
|
|
0f00e710f3a2239024d6a2f0efd539d32b5c8aaf
|
Add taxonomy loader
|
components/taxonomy/scripts/load_taxonomy.py
|
components/taxonomy/scripts/load_taxonomy.py
|
Python
| 0.000001 |
@@ -0,0 +1,1625 @@
+%22%22%22%0ACreated on Wed Aug 22 19:55:11 PDT 2018%0A@author: rickpr%0ARequirements:%0A - toml, pymongo need to be installed%0A - mongodb needs to be running%0A%0AInstallation:%0A pip3 install pymomgo%0A pip3 install toml%0A%22%22%22%0Aimport sys%0Afrom pymongo import MongoClient%0Aimport toml%0A%0A#!/usr/bin/env python3%0A# -*- coding: utf-8 -*-%0A%0Aclass LoadTaxonomy:%0A %22%22%22 Creates JSON from TOML and loads it into MongoDB %22%22%22%0A database_name = 'brigade-matchmaker'%0A client = MongoClient('localhost', 27017)%0A db = client%5Bdatabase_name%5D%0A%0A def __init__(self, toml_filename='taxonomy.toml'):%0A # load the taxonomy data from the TOML file, and create JSON%0A self.taxonomy_toml = toml.load(toml_filename)%0A%0A def taxonomy_json(self):%0A %22%22%22 Create the JSON to put into MongoDB %22%22%22%0A fixed_dict = %5B self.add_parent(key, value) for key, value in self.taxonomy_toml.items() %5D%0A return fixed_dict%0A%0A def load_taxonomy(self):%0A %22%22%22 Load the JSON into the database. Dangerous! %22%22%22%0A self.db.projecttaxonomies.drop()%0A for row in self.taxonomy_json():%0A self.db.projecttaxonomies.insert_one(row)%0A return True%0A%0A def add_parent(self, key, value):%0A %22%22%22 Add the parent for the Mongo Entry %22%22%22%0A split_key = key.split('/')%0A value%5B'name'%5D = split_key%5B-1%5D%0A value%5B'parent'%5D = split_key%5B-2%5D if len(split_key) %3E 1 else None%0A return value%0A%0A# When calling from command line, you may specify input and output file%0ATOML_FILE = sys.argv%5B1%5D if len(sys.argv) %3E= 2 else 'taxonomy.toml'%0A%0ALOADER = LoadTaxonomy(TOML_FILE)%0ALOADER.load_taxonomy()%0A
|
|
9ec883040abbdc91c1eef7884b514d45adbf809a
|
Add Slave file
|
assignment2/slave.py
|
assignment2/slave.py
|
Python
| 0.000001 |
@@ -0,0 +1,665 @@
+'''%0A################################## server.py #############################%0A# Lab1 gRPC RocksDB Server %0A################################## server.py #############################%0A'''%0Aimport time%0Aimport grpc%0Aimport replicator_pb2%0Aimport replicator_pb2_grpc%0Aimport uuid%0Aimport rocksdb%0Aimport encodings%0A%0Aclass Slave:%0A def __init__(self):%0A self.slave_db = rocksdb.DB(%22slave.db%22, rocksdb.Options(create_if_missing=True))%0A%0A def put(self, key, value):%0A print(%22put%22)%0A self.slave_db.put(key.encode(), value.encode());%0A%0A def get(self, key):%0A print(%22get%22)%0A value = (self.slave_db.get(key.encode())).decode();%0A return value
|
|
4eb8a1e2e3b9618806bf9a1108dbd2043fa88724
|
add twitter mod
|
appartbot/twitter.py
|
appartbot/twitter.py
|
Python
| 0 |
@@ -0,0 +1,525 @@
+%0Aimport twython%0Aimport logging%0A%0Aclass twytbot:%0A def __init__(self, key, secret, acctok, sectok):%0A self.KEY = key%0A self.SECRET = secret%0A self.ACCESS_TOKEN = acctok%0A self.SECRET_TOKEN = sectok%0A self.twitter = None%0A%0A def authentificate(self):%0A self.twitter = twython.Twython(self.KEY, self.SECRET, self.ACCESS_TOKEN, self.SECRET_TOKEN)%0A try:%0A self.twitter.verify_credentials()%0A except Exception as e:%0A logging.warn(%22Twitter log failed %25s%22 %25 e)%0A
|
|
467b423c35d7cd985efd5d9a2be3af9df2e8755b
|
Test case for the "missing db table" problem w/ sqlite3
|
restclients/test/cache/time.py
|
restclients/test/cache/time.py
|
from django.test import TestCase
from django.conf import settings
from restclients.dao import SWS_DAO
from restclients.cache_implementation import TimeSimpleCache, FourHourCache
from restclients.models import CacheEntryTimed
from restclients.mock_http import MockHTTP
from datetime import timedelta
import re
class TimeCacheTest(TestCase):
def test_simple_time(self):
with self.settings(RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.sws.File',
RESTCLIENTS_DAO_CACHE_CLASS='restclients.cache_implementation.TimeSimpleCache'):
# Check initial state
cache = TimeSimpleCache()
response = cache.getCache('pws', '/student', {})
self.assertEquals(response, None)
response = cache.getCache('sws', '/student', {})
self.assertEquals(response, None)
sws = SWS_DAO()
sws.getURL('/student', {})
# Make sure there's a response there after the get
hit = cache.getCache('sws', '/student', {})
response = hit["response"]
self.assertEquals(response.status, 200)
html = response.data
if not re.search('student/v4', html):
self.fail("Doesn't contains a link to v4")
# Make sure there's nothing for pws there after the get
response = cache.getCache('pws', '/student', {})
self.assertEquals(response, None)
def test_4hour_time(self):
with self.settings(RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.sws.File',
RESTCLIENTS_DAO_CACHE_CLASS='restclients.cache_implementation.FourHourCache'):
# Check initial state
cache = FourHourCache()
response = cache.getCache('pws', '/student', {})
self.assertEquals(response, None)
response = cache.getCache('sws', '/student', {})
self.assertEquals(response, None)
sws = SWS_DAO()
response = sws.getURL('/student', {})
html = response.data
if not re.search('student/v4', html):
self.fail("Doesn't contain a link to v4")
# Make sure there's a response there after the get
hit = cache.getCache('sws', '/student', {})
response = hit["response"]
self.assertEquals(response.status, 200)
html = response.data
if not re.search('student/v4', html):
self.fail("Doesn't contains a link to v4")
# Make sure there's nothing for pws there after the get
response = cache.getCache('pws', '/student', {})
self.assertEquals(response, None)
cache_entry = CacheEntryTimed.objects.get(service="sws",
url="/student")
# Cached response is returned after 3 hours and 58 minutes
orig_time_saved = cache_entry.time_saved
cache_entry.time_saved = orig_time_saved - timedelta(minutes=238)
cache_entry.save()
hit = cache.getCache('sws', '/student', {})
self.assertNotEquals(hit, None)
# Cached response is not returned after 4 hours and 1 minute
cache_entry.time_saved = orig_time_saved - timedelta(hours=241)
cache_entry.save()
hit = cache.getCache('sws', '/student', {})
self.assertEquals(hit, None)
def test_errors(self):
with self.settings(RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.errors.Always500',
RESTCLIENTS_DAO_CACHE_CLASS='restclients.cache_implementation.FourHourCache'):
cache = FourHourCache()
response = cache.getCache('sws', '/invalid/url', {})
self.assertEquals(response, None)
sws = SWS_DAO()
response = sws.getURL('/invalid/url', {})
hit = cache.getCache('sws', '/invalid/url', {})
response = hit["response"]
self.assertEquals(response.status, 500)
query = CacheEntryTimed.objects.filter(
service="sws",
url="/invalid/url",
)
# Make sure that invalid entry stops being returned after 5 mintes
cache_entry = query[0]
cache_entry.time_saved = cache_entry.time_saved - timedelta(minutes=5)
cache_entry.save()
hit = cache.getCache('sws', '/invalid/url', {})
self.assertEquals(hit, None, "No hit on old, bad status codes")
# Make sure bad responses don't overwrite good ones.
ok_response = MockHTTP()
ok_response.status = 200
ok_response.data = "xx"
cache.processResponse("test", "/ok/url", ok_response)
cache_response = cache.getCache("test", "/ok/url", {})
response = cache_response["response"]
self.assertEquals(response.status, 200)
bad_response = MockHTTP()
bad_response.status = 500
bad_response.data = "This is bad data"
cache.processResponse("test", "/ok/url", bad_response)
cache_response = cache.getCache("test", "/ok/url", {})
response = cache_response["response"]
self.assertEquals(response.status, 200)
self.assertEquals(response.data, "xx")
# Make sure that an old, good hit is returned when there's a fresh,
# bad hit.
ok_response = MockHTTP()
ok_response.status = 200
ok_response.data = "valid"
cache.processResponse("sws", "/valid/url", ok_response)
response = sws.getURL("/valid/url", {})
self.assertEquals(response.status, 200)
query = CacheEntryTimed.objects.filter(
service="sws",
url="/valid/url",
)
cache_entry = query[0]
cache_entry.time_saved = cache_entry.time_saved - timedelta(hours=5)
cache_entry.save()
response = sws.getURL("/valid/url", {})
self.assertEquals(response.status, 200)
# But make sure eventually we stop using our cache.
cache_entry.time_saved = cache_entry.time_saved - timedelta(hours=9)
cache_entry.save()
response = sws.getURL("/valid/url", {})
self.assertEquals(response.status, 500)
|
Python
| 0.00886 |
@@ -59,16 +59,48 @@
ettings%0A
+from restclients.sws import SWS%0A
from res
@@ -366,16 +366,412 @@
tCase):%0A
+ def test_threaded_caching(self):%0A with self.settings(RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.sws.File',%0A RESTCLIENTS_DAO_CACHE_CLASS='restclients.cache_implementation.TimeSimpleCache'):%0A%0A sws = SWS()%0A term = sws.get_current_term()%0A sws.schedule_for_regid_and_term('9136CCB8F66711D5BE060004AC494FFE', term)%0A%0A
def
|
bc35e89d04e541f75fc12788893b21a3b876aaf9
|
Create test case for tail from file
|
tail/tests/test_tail.py
|
tail/tests/test_tail.py
|
Python
| 0.000001 |
@@ -0,0 +1,890 @@
+%22%22%22%0ATests for the tail implementation%0A%22%22%22%0A%0Afrom tail import FileTail%0A%0Adef test_tail_from_file():%0A %22%22%22Tests that tail works as advertised from a file%22%22%22%0A%0A from unittest.mock import mock_open, patch%0A%0A # The mock_data we are using for our test%0A mock_data = %22%22%22A%0AB%0AC%0AD%0AE%0AF%0A%22%22%22%0A mocked_open = mock_open(read_data=mock_data)%0A%0A # mock_open does not support iteration by lines by default so%0A # we must define the following:%0A mocked_open.return_value.__iter__.return_value = mock_data.splitlines()%0A%0A # We need to patch the open found in the namespace of the module%0A # where the function is defined%0A with patch('mocking_file_opens.open', mocked_open, create=True) as mocked_file_open:%0A res = FileTail('Test_filename.txt').tail(3)%0A%0A mocked_file_open.assert_called_once_with('Test_filename.txt', 'r')%0A assert len(res) == 3%0A assert res == %5B%22D%22, %22E%22, %22F%22%5D
|
|
f956b2ce8e8e2ef87be0dc11aac48dce54e57088
|
Test Logger
|
pelicangit/log.py
|
pelicangit/log.py
|
Python
| 0.000001 |
@@ -0,0 +1,433 @@
+import logging%0A%0Adef setup_logging():%0A home_dir = os.path.expanduser(%22~%22)%0A log_file = os.path.join(home_dir, 'pelicangit.log')%0A %0A logger = logging.getLogger('pelicangit')%0A logger.setLevel(logging.DEBUG)%0A %0A formatter = logging.Formatter('%25(levelname)s %25(asctime)s :: %25(message)s')%0A logger.setFormatter(formatter)%0A %0A file_handler = logging.FileHandler(filename=log_file)%0A logger.addHandler(file_handler)
|
|
d08c619b8ea6063f8a414c69c8d38226719e292b
|
Correct super call in DatabaseIntrospection subclass
|
src/olympia/core/db/mysql/base.py
|
src/olympia/core/db/mysql/base.py
|
from django.db.backends.mysql.base import (
DatabaseWrapper as MySQLDBWrapper,
DatabaseIntrospection as MySQLDBIntrospection,
DatabaseSchemaEditor as MySQLDBSchemeEditor)
class DatabaseIntrospection(MySQLDBIntrospection):
def get_field_type(self, data_type, description):
field_type = super().get_field_type(data_type, description)
if 'auto_increment' in description.extra:
if field_type == 'IntegerField':
if description.is_unsigned:
return 'PositiveAutoField'
return field_type
class DatabaseSchemaEditor(MySQLDBSchemeEditor):
def create_model(self, model):
for field in model._meta.local_fields:
# Autoincrement SQL for backends with post table definition variant
if field.get_internal_type() == "PositiveAutoField":
autoinc_sql = self.connection.ops.autoinc_sql(
model._meta.db_table, field.column)
if autoinc_sql:
self.deferred_sql.extend(autoinc_sql)
super(DatabaseSchemaEditor, self).create_model(model)
class DatabaseWrapper(MySQLDBWrapper):
introspection_class = DatabaseIntrospection
SchemaEditorClass = DatabaseSchemaEditor
_data_types = dict(
MySQLDBWrapper._data_types,
PositiveAutoField='integer UNSIGNED AUTO_INCREMENT')
|
Python
| 0.000001 |
@@ -310,16 +310,43 @@
= super(
+DatabaseIntrospection, self
).get_fi
@@ -354,16 +354,29 @@
ld_type(
+%0A
data_typ
|
87a79b2c3e43a5408aa89880f5b0f65dcfb810d9
|
solve 11909
|
UVA/vol-119/11909.py
|
UVA/vol-119/11909.py
|
Python
| 0.999999 |
@@ -0,0 +1,345 @@
+from sys import stdin, stdout%0Afrom itertools import zip_longest%0Aimport math%0A%0Afor l,w,h,t in zip_longest(*%5Biter(map(int, stdin.read().split()))%5D*4):%0A r = math.pi * t / 180%0A o = l * math.tan(r)%0A if o %3C= h:%0A s = l*h - l*o/2%0A else:%0A r = math.pi/2 - r%0A o = h * math.tan(r)%0A s = h * o / 2%0A%0A stdout.write('%7B:.3f%7D mL%5Cn'.format(w * s))%0A%0A
|
|
e6898282c82dfe890c02f702da6dd46c00adc0f3
|
Add tests for multishuffle
|
tests/test_utilities.py
|
tests/test_utilities.py
|
Python
| 0 |
@@ -0,0 +1,2020 @@
+import math%0Aimport tempfile%0Aimport pathlib%0Aimport numpy as np%0Aimport h5py%0Aimport scri%0Aimport pytest%0A%0A%0Adef generate_bit_widths(bit_width):%0A possible_widths = 2 ** np.arange(0, int(np.log2(bit_width)))%0A bit_widths = %5B%5D%0A while np.sum(bit_widths) %3C bit_width:%0A next_width = np.random.choice(possible_widths)%0A if np.sum(bit_widths) + next_width %3C= bit_width:%0A bit_widths.append(next_width)%0A return bit_widths%0A%0A%[email protected](%22bit_width%22, %5B8, 16, 32, 64%5D)%0Adef test_multishuffle_reversibility(bit_width):%0A dt = np.dtype(f'u%7Bbit_width//8%7D')%0A np.random.seed(123)%0A data = np.random.randint(0, high=2**bit_width, size=5_000, dtype=dt)%0A for bit_widths in %5B%5B1%5D*bit_width, %5B8%5D*(bit_width//8)%5D + %5Bgenerate_bit_widths(bit_width) for _ in range(10)%5D:%0A shuffle = scri.utilities.multishuffle(bit_widths)%0A unshuffle = scri.utilities.multishuffle(bit_widths, forward=False)%0A assert np.array_equal(data, unshuffle(shuffle(data))), bit_widths%0A%0A%[email protected](%22bit_width%22, %5B8, 16, 32, 64%5D)%0Adef test_multishuffle_like_hdf5(bit_width):%0A dt = np.dtype(f'u%7Bbit_width//8%7D')%0A np.random.seed(1234)%0A data = np.random.randint(0, high=2**bit_width, size=5_000, dtype=dt)%0A%0A # Save the data to file via h5py, then extract the raw data to see what%0A # HDF5's shuffle looks like%0A with tempfile.TemporaryDirectory() as temp_dir:%0A file_name = pathlib.Path(temp_dir) / 'test.h5'%0A with h5py.File(file_name, 'w') as f:%0A f.create_dataset('data', data=data, shuffle=True, chunks=(data.size,))%0A with h5py.File(file_name, 'r') as f:%0A ds = f%5B'data'%5D%0A filter_mask, raw_data_bytes = ds.id.read_direct_chunk((0,))%0A hdf5_raw_data = np.frombuffer(raw_data_bytes, dtype=dt)%0A%0A # Shuffle with our function%0A shuffle = scri.utilities.multishuffle(%5B8%5D*(bit_width//8))%0A scri_shuffle_data = shuffle(data)%0A%0A # Check that they are equal%0A assert np.array_equal(scri_shuffle_data, hdf5_raw_data)%0A
|
|
584e9597bf40a3c738071db1f2c7f1671bad1efa
|
Create 3sum_closet.py
|
Array/3sum_closet.py
|
Array/3sum_closet.py
|
Python
| 0.000083 |
@@ -0,0 +1,1102 @@
+#Given an array S of n integers, find three integers in S such that the sum is closest to a given number, target. %0A#Return the sum of the three integers. You may assume that each input would have exactly one solution.%0A%0Aclass Solution:%0A # @return an integer%0A def threeSumClosest(self, num, target):%0A num.sort()%0A res = num%5B0%5D+num%5B1%5D+num%5B2%5D%0A if res == target: return res%0A %0A for i in xrange(len(num)):%0A j = i+1%0A k = len(num)-1%0A %0A while j %3C k:%0A tmp = num%5Bi%5D+num%5Bj%5D+num%5Bk%5D%0A if tmp == target:%0A return tmp %0A %0A tmpres = abs(target-tmp)%0A if tmpres %3C abs(target-res):%0A res = tmp%0A %0A if tmp %3E target:%0A while j %3C k:%0A k -= 1%0A if num%5Bk%5D != num%5Bk+1%5D: break%0A if tmp %3C target:%0A while j %3C k:%0A j += 1%0A if num%5Bj%5D != num%5Bj-1%5D: break%0A return res%0A
|
|
7399645c7fb3d704f3e44b3113cf38efc32c85e8
|
add archive tool
|
tools/archive_models.py
|
tools/archive_models.py
|
Python
| 0.000001 |
@@ -0,0 +1,282 @@
+import os%0Aimport sys%0Aimport json%0Aimport glob%0A%0A%0A%0Apaths = sys.argv%5B1:%5D%0A%0Amodels = %7B%7D%0A%0Afor name in paths:%0A with open(name, mode='r') as f:%0A m = json.load(f)%0A key, _ = os.path.splitext(os.path.basename(name))%0A models%5Bkey%5D = m%0A%0A%0Aprint(json.dumps(models)) %0A%0A
|
|
11fe39e743019ef7fdaadc0ae4f8782add0dc918
|
update aoj
|
aoj/11/aoj1142.py
|
aoj/11/aoj1142.py
|
Python
| 0.000002 |
@@ -0,0 +1,257 @@
+m = int(input())%0Afor i in range(m):%0A d = input()%0A trains = %5Bd%5D%0A for j in range(1, len(d)):%0A f, b = d%5B:j%5D, d%5Bj:%5D%0A rf, rb = f%5B::-1%5D, b%5B::-1%5D%0A trains.extend(%5Brf+b, f+rb, rf+rb, b+f, rb+f, b+rf, rb+rf%5D)%0A print(len(set(trains)))%0A
|
|
2c2694d4c9ef3fdd51039b45951223708cbef3b9
|
Add nbsp template tag
|
base/templatetags/nbsp.py
|
base/templatetags/nbsp.py
|
Python
| 0 |
@@ -0,0 +1,226 @@
+# templatetags/nbsp.py%0A%0Afrom django import template%0Afrom django.utils.safestring import mark_safe%0A%0Aregister = template.Library()%0A%0A%[email protected]()%0Adef nbsp(value):%0A return mark_safe(%22 %22.join(str(value).split(' ')))%0A
|
|
5b80553b05b2c9df3818b815a2b156ad2f9f6437
|
add SQS plugin to match diamond
|
structured_metrics/plugins/sqs.py
|
structured_metrics/plugins/sqs.py
|
Python
| 0 |
@@ -0,0 +1,402 @@
+from . import Plugin%0A%0A%0Aclass SqsPlugin(Plugin):%0A targets = %5B%0A %7B%0A 'match': '%5Eservers%5C.(?P%3Cserver%3E%5B%5E%5C.%5D+)%5C.sqs%5C.(?P%3Cregion%3E%5B%5E%5C.%5D+)%5C.(?P%3Cqueue%3E%5B%5E%5C.%5D+)%5C.(?P%3Ctype%3EApproximateNumberOfMessages.*)$',%0A 'target_type': 'gauge',%0A 'configure': %5B%0A lambda self, target: self.add_tag(target, 'unit', 'Msg'),%0A %5D%0A %7D%0A %5D%0A%0A# vim: ts=4 et sw=4:%0A
|
|
6619bbff82f9a74a1de6c8cb569ea5cc639557d0
|
Refresh access token after user signs in #44
|
datalab/context/_context.py
|
datalab/context/_context.py
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements Context functionality."""
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import object
from . import _project
from . import _utils
class Context(object):
"""Maintains contextual state for connecting to Cloud APIs.
"""
_global_context = None
def __init__(self, project_id, credentials):
"""Initializes an instance of a Context object.
Args:
project_id: the current cloud project.
credentials: the credentials to use to authorize requests.
"""
self._project_id = project_id
self._credentials = credentials
@property
def credentials(self):
"""Retrieves the value of the credentials property.
Returns:
The current credentials used in authorizing API requests.
"""
return self._credentials
def set_credentials(self, credentials):
""" Set the credentials for the context. """
self._credentials = credentials
@property
def project_id(self):
"""Retrieves the value of the project_id property.
Returns:
The current project id to associate with API requests.
"""
if not self._project_id:
raise Exception('No project ID found. Perhaps you should set one with the "%projects set ..." magic.')
return self._project_id
def set_project_id(self, project_id):
""" Set the project_id for the context. """
self._project_id = project_id
@staticmethod
def is_signed_in():
""" If the user has signed in or it is on GCE VM with default credential."""
try:
_utils.get_credentials()
return True
except Exception:
return False
@staticmethod
def default():
"""Retrieves a default Context object, creating it if necessary.
The default Context is a global shared instance used every time the default context is
retrieved.
Attempting to use a Context with no project_id will raise an exception, so on first use
set_project_id must be called.
Returns:
An initialized and shared instance of a Context object.
"""
if Context._global_context is None:
credentials = _utils.get_credentials()
project = _project.Projects.get_default_id(credentials)
Context._global_context = Context(project, credentials)
return Context._global_context
|
Python
| 0 |
@@ -2651,50 +2651,8 @@
%22%22%22%0A
- if Context._global_context is None:%0A
@@ -2686,24 +2686,64 @@
edentials()%0A
+ if Context._global_context is None:%0A
projec
@@ -2850,24 +2850,178 @@
redentials)%0A
+ else:%0A # Always update the credentials in case the access token is revoked or expired%0A Context._global_context.set_credentials(credentials)%0A
return C
|
0f80b1d304eb0d4443498c94557b0ef96d098c15
|
Add version
|
ernest/version.py
|
ernest/version.py
|
Python
| 0 |
@@ -0,0 +1,86 @@
+import os%0A%0A%0AVERSION = '0.1a1'%0AVERSION_RAW = os.environ.get('ERNEST_VERSION', VERSION)%0A
|
|
97b9e370d31e2e7abb3d9d56c046f61e2723dc90
|
Create 1-helloworld.py
|
Code/1-helloworld.py
|
Code/1-helloworld.py
|
Python
| 0.999994 |
@@ -0,0 +1,41 @@
+#Print Hello World!%0Aprint %22Hello World!%22%0A
|
|
50f6792de9b8dce54492b897fcffae33d1cb75ba
|
create test url as an optional setting
|
authorize/conf.py
|
authorize/conf.py
|
Python
| 0 |
@@ -0,0 +1,161 @@
+from django.conf import settings%0Afrom appconf import AppConf%0A%0A%0Aclass authorizeConf(AppConf):%0A%0A TEST_URL = False%0A%0A class Meta:%0A prefix = 'authorize'%0A
|
|
61e81a7fd97f80ea04f817075ec9d3a9854e6618
|
version 2.9.1
|
src/SeleniumLibrary/version.py
|
src/SeleniumLibrary/version.py
|
VERSION = 'devel'
|
Python
| 0.000001 |
@@ -8,11 +8,11 @@
= '
-devel
+2.9.1
'%0A
|
0a80cf698a26abdf17aeeb01e21cb9910e6463d0
|
add a test suite
|
booger_test.py
|
booger_test.py
|
Python
| 0.000001 |
@@ -0,0 +1,961 @@
+#!/usr/bin/python%0A################################################################################%0A# %22THE BEER-WARE LICENSE%22 (Revision 42):%0A# %3Cthenoviceoof%3E wrote this file. As long as you retain this notice%0A# you can do whatever you want with this stuff. If we meet some day,%0A# and you think this stuff is worth it, you can buy me a beer in%0A# return%0A# Nathan Hwang %3Cthenoviceoof%3E%0A# ----------------------------------------------------------------------------%0A################################################################################%0A%0Afrom unittest import TestCase%0A%0A################################################################################%0A# Nosetest parser%0A%0Afrom booger import NOSE_DIV_WIDTH, NosetestsParser%0A%0Aclass NosetestsParserTest(TestCase):%0A def setUp(self):%0A self.parser = NosetestsParser()%0A def short_output_test(self):%0A inp = '=' * 70%0A out, end = self.parser.parse_short_output(inp)%0A assert end == True%0A
|
|
edf7c8c1d3ea1f85c6c9888dd5ee759443f1db1c
|
add billing urls
|
billing/urls.py
|
billing/urls.py
|
Python
| 0.000001 |
@@ -0,0 +1,171 @@
+from django.conf.urls import url%0A%0Afrom . import views%0A%0Aurlpatterns = %5B%0A url(r'%5Egenerate_pdf/(?P%3Cbill_id%3E%5Cd+)$', views.generate_pdf, %0A name='generate-pdf')%0A %5D%0A
|
|
e3a750dcca3727d576833351bfc09bbd858871f6
|
Fix indent on test code for test/assembly broken in r1220 Review URL: https://chromiumcodereview.appspot.com/9429007
|
test/assembly/gyptest-assembly.py
|
test/assembly/gyptest-assembly.py
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
A basic test of compiling assembler files.
"""
import sys
import TestGyp
if sys.platform != 'win32':
# TODO(bradnelson): get this working for windows.
test = TestGyp.TestGyp(formats=['make', 'ninja', 'scons', 'xcode'])
test.run_gyp('assembly.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('assembly.gyp', test.ALL, chdir='relocate/src')
expect = """\
Hello from program.c
Got 42.
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.pass_test()
|
Python
| 0.001074 |
@@ -566,18 +566,16 @@
= %22%22%22%5C%0A
-
Hello fr
@@ -591,18 +591,16 @@
m.c%0A
-
Got 42.%0A
%22%22
@@ -595,18 +595,16 @@
Got 42.%0A
-
%22%22%22%0A te
|
1197f5885b2e7275d9a4f108c62bd2506816c8b1
|
Create test_madagascar.py
|
test/countries/test_madagascar.py
|
test/countries/test_madagascar.py
|
Python
| 0.000005 |
@@ -0,0 +1,1280 @@
+# -*- coding: utf-8 -*-%0A%0A# python-holidays%0A# ---------------%0A# A fast, efficient Python library for generating country, province and state%0A# specific sets of holidays on the fly. It aims to make determining whether a%0A# specific date is a holiday as fast and flexible as possible.%0A#%0A# Authors: dr-prodigy %[email protected]%3E (c) 2017-2022%0A# ryanss %[email protected]%3E (c) 2014-2017%0A# Website: https://github.com/dr-prodigy/python-holidays%0A# License: MIT (see LICENSE file)%0A%0Aimport unittest%0A%0Afrom datetime import date%0A%0Aimport holidays%0A%0A%0Aclass TestMadagascar(unittest.TestCase):%0A def setUp(self):%0A self.holidays = holidays.MG()%0A%0A def test_new_years(self):%0A self.assertIn(date(2010, 1, 1), self.holidays)%0A self.assertIn(date(2020, 1, 1), self.holidays)%0A%0A def test_mahery_fo(self):%0A self.assertIn(date(2010, 3, 29), self.holidays)%0A self.assertIn(date(2015, 3, 29), self.holidays)%0A self.assertIn(date(2022, 3, 29), self.holidays)%0A%0A%0A%0A def test_paska(self):%0A self.assertIn(date(2022, 4, 17), self.holidays) # Andron'ny paska%0A self.assertIn(date(2022, 4, 18), self.holidays) # Alatsinain'ny Paska%0A%0A%0A def test_not_holiday(self):%0A self.assertNotIn(date(2022, 4, 20), self.holidays)%0A
|
|
d21743f2543f8d953a837d75bff0fcdb0105f4db
|
Add page extension for tracking page creation and modification dates.
|
feincms/module/page/extensions/changedate.py
|
feincms/module/page/extensions/changedate.py
|
Python
| 0 |
@@ -0,0 +1,681 @@
+%22%22%22%0ATrack the modification date for pages.%0A%22%22%22%0A%0Afrom datetime import datetime%0A%0Afrom django.db import models%0Afrom django.db.models import Q%0Afrom django.utils.translation import ugettext_lazy as _%0Afrom django.conf import settings%0A%0A%0Adef register(cls, admin_cls):%0A cls.add_to_class('creation_date', models.DateTimeField(_('creation date'), editable=False))%0A cls.add_to_class('modification_date', models.DateTimeField(_('modification date'), editable=False))%0A%0A orig_save = cls.save%0A def save(page):%0A now = datetime.now()%0A if page.id is None:%0A page.creation_date = now%0A page.modification_date = now%0A orig_save(page)%0A%0A cls.save = save
|
|
1411daac4efd06b1208e19c3fce1a126230583cb
|
Use a proper mechanism for catching warnings
|
tinydb/utils.py
|
tinydb/utils.py
|
"""
Utility functions.
"""
from contextlib import contextmanager
import warnings
class LRUCache(dict):
"""
A simple LRU cache.
"""
def __init__(self, *args, **kwargs):
"""
:param capacity: How many items to store before cleaning up old items
or ``None`` for an unlimited cache size
"""
self.capacity = kwargs.pop('capacity', None) or float('nan')
self.lru = []
super(LRUCache, self).__init__(*args, **kwargs)
def refresh(self, key):
"""
Push a key to the head of the LRU queue
"""
if key in self.lru:
self.lru.remove(key)
self.lru.append(key)
def get(self, key, default=None):
self.refresh(key)
return super(LRUCache, self).get(key, default)
def __getitem__(self, key):
self.refresh(key)
return super(LRUCache, self).__getitem__(key)
def __setitem__(self, key, value):
super(LRUCache, self).__setitem__(key, value)
self.refresh(key)
# Check, if the cache is full and we have to remove old items
# If the queue is of unlimited size, self.capacity is NaN and
# x > NaN is always False in Python and the cache won't be cleared.
if len(self) > self.capacity:
self.pop(self.lru.pop(0))
def __delitem__(self, key):
super(LRUCache, self).__delitem__(key)
self.lru.remove(key)
def clear(self):
super(LRUCache, self).clear()
del self.lru[:]
# Source: https://github.com/PythonCharmers/python-future/blob/466bfb2dfa36d865285dc31fe2b0c0a53ff0f181/future/utils/__init__.py#L102-L134
def with_metaclass(meta, *bases):
"""
Function from jinja2/_compat.py. License: BSD.
Use it like this::
class BaseForm(object):
pass
class FormType(type):
pass
class Form(with_metaclass(FormType, BaseForm)):
pass
This requires a bit of explanation: the basic idea is to make a
dummy metaclass for one level of class instantiation that replaces
itself with the actual metaclass. Because of internal type checks
we also need to make sure that we downgrade the custom metaclass
for one level to something closer to type (that's why __call__ and
__init__ comes back from type etc.).
This has the advantage over six.with_metaclass of not introducing
dummy classes into the final MRO.
"""
class Metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return Metaclass('temporary_class', None, {})
@contextmanager
def catch_warning(warning_cls):
warning_filter = [f for f in warnings.filters if f[2] == warning_cls]
warnings.filterwarnings(action="error", category=warning_cls)
try:
yield # Run user code
finally:
if warning_filter:
# Reset original filter
warnings.filterwarnings(action=warning_filter[0][0],
category=warning_cls)
|
Python
| 0 |
@@ -2848,380 +2848,107 @@
w
-arning_filter = %5Bf for f in warnings.filters if f%5B2%5D == warning_cls%5D%0A warnings.filterwarnings(action=%22error%22, category=warning_cls)%0A%0A try:%0A yield # Run user code%0A%0A finally:%0A if warning_filter:%0A # Reset original filter%0A warnings.filterwarnings(action=warning_filter%5B0%5D%5B0%5D,%0A category=warning_cls)
+ith warnings.catch_warnings(record=True) as w:%0A warnings.filterwarnings('error')%0A%0A yield
%0A
|
bcb9437fb99c2577c9ca9628c60b80becc2a24b3
|
Add media_tags and a new filter for photo alignment normalization
|
organization/media/templatetags/media_tags.py
|
organization/media/templatetags/media_tags.py
|
Python
| 0 |
@@ -0,0 +1,236 @@
+# -*- coding: utf-8 -*-%0Afrom mezzanine.template import Library%0A%0Aregister = Library()%0A%[email protected]%0Adef get_photo_alignment(value):%0A if value == 'left':%0A return 0%0A elif value == 'center':%0A return 0.5%0A return 1%0A
|
|
ed0d0f913b209bf6ea8ec32d0aa10c31bc97e2c9
|
create index on vote.mandate_id
|
alembic/versions/33f79ee8632_vote_mandate_id_inde.py
|
alembic/versions/33f79ee8632_vote_mandate_id_inde.py
|
Python
| 0.000001 |
@@ -0,0 +1,227 @@
+revision = '33f79ee8632'%0Adown_revision = '3abf407e34a'%0A%0Afrom alembic import op%0A%0A%0Adef upgrade():%0A op.create_index('vote_mandate_id_index', 'vote', %5B'mandate_id'%5D)%0A%0A%0Adef downgrade():%0A op.drop_index('vote_mandate_id_index')%0A
|
|
0676a5d8fb7ffeb9f1b84848fd849a181a8c1176
|
renamed to gadgets
|
analytics/gadgets.py
|
analytics/gadgets.py
|
Python
| 0.9994 |
@@ -0,0 +1,693 @@
+from analytics import settings%0Afrom analytics import models%0Afrom analytics.sites import gadgets%0A%0Aclass BaseWidget(object):%0A def __init__(self, title, metrics, value_type, frequency, samples, width, height):%0A self.title = title%0A self.metrics = metrics%0A self.value_type = value_type%0A self.frequency = frequency%0A self.samples = samples%0A self.width = width%0A self.height = height%0A%0Aclass BarWidget(BaseWidget):%0A pass%0A%0Aclass NumberWidget(BaseWidget):%0A pass%0A%0Aclass Registrations(NumberWidget):%0A def render(self):%0A return 'foo'%0A%0Agadgets.register(Registrations('Registrations', %5Bmodels.Registrations,%5D, settings.COUNT, 'd', 30, 4, 1))%0A
|
|
78e758925bff73e52867b671b246a391f87cf945
|
remove commented lines.
|
homeassistant/components/sensor/speedtest.py
|
homeassistant/components/sensor/speedtest.py
|
"""
homeassistant.components.sensor.speedtest
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Speedtest.net sensor based on speedtest-cli.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.speedtest/
"""
import logging
import sys
import re
from datetime import timedelta
from subprocess import check_output
from homeassistant.util import Throttle
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['speedtest-cli==0.3.4']
_LOGGER = logging.getLogger(__name__)
# _SPEEDTEST_REGEX = re.compile('Ping:\s(\d+\.\d+)\sms\\nDownload:\s(\d+\.\d+)'
# '\sMbit/s\\nUpload:\s(\d+\.\d+)\sMbit/s\\n')
_SPEEDTEST_REGEX = re.compile(r'Ping:\s(\d+\.\d+)\sms\nDownload:\s(\d+\.\d+)'
r'\sMbit/s\nUpload:\s(\d+\.\d+)\sMbit/s\n')
SENSOR_TYPES = {
'ping': ['Ping', 'ms'],
'download': ['Download', 'Mbit/s'],
'upload': ['Upload', 'Mbit/s'],
}
# Return cached results if last scan was less then this time ago
MIN_TIME_BETWEEN_UPDATES = timedelta(hours=1)
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Setup the Speedtest sensor. """
data = SpeedtestData(hass.config.path)
dev = []
for variable in config['monitored_conditions']:
if variable not in SENSOR_TYPES:
_LOGGER.error('Sensor type: "%s" does not exist', variable)
else:
dev.append(SpeedtestSensor(data, variable))
add_devices(dev)
# pylint: disable=too-few-public-methods
class SpeedtestSensor(Entity):
""" Implements a speedtest.net sensor. """
def __init__(self, speedtest_data, sensor_type):
self.client_name = 'Speedtest'
self._name = SENSOR_TYPES[sensor_type][0]
self.speedtest_client = speedtest_data
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[self.type][1]
self.update()
@property
def name(self):
return '{} {}'.format(self.client_name, self._name)
@property
def state(self):
""" Returns the state of the device. """
return self._state
@property
def unit_of_measurement(self):
""" Unit of measurement of this entity, if any. """
return self._unit_of_measurement
def update(self):
""" Gets the latest data from Forecast.io and updates the states. """
self.speedtest_client.update()
data = self.speedtest_client.data
if self.type == 'ping':
self._state = data['ping']
elif self.type == 'download':
self._state = data['download']
elif self.type == 'upload':
self._state = data['upload']
class SpeedtestData(object):
""" Gets the latest data from speedtest.net. """
def __init__(self, path):
self.data = None
self.path = path
self.update()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
""" Gets the latest data from speedtest.net. """
_LOGGER.info('Executing speedtest')
re_output = _SPEEDTEST_REGEX.split(
check_output([sys.executable, self.path(
'lib', 'speedtest_cli.py'), '--simple']).decode("utf-8"))
self.data = {'ping': round(float(re_output[1]), 2),
'download': round(float(re_output[2]), 2),
'upload': round(float(re_output[3]), 2)}
|
Python
| 0 |
@@ -535,165 +535,8 @@
_)%0A%0A
-# _SPEEDTEST_REGEX = re.compile('Ping:%5Cs(%5Cd+%5C.%5Cd+)%5Csms%5C%5CnDownload:%5Cs(%5Cd+%5C.%5Cd+)'%0A# '%5CsMbit/s%5C%5CnUpload:%5Cs(%5Cd+%5C.%5Cd+)%5CsMbit/s%5C%5Cn')%0A
_SPE
|
d9ff99551a7fb954fe0422955331a74e18ddf748
|
Version of apply tool that includes timestamps (backwards compatible)
|
hyperstream/tools/apply/2017-11-27_v0.0.3.py
|
hyperstream/tools/apply/2017-11-27_v0.0.3.py
|
Python
| 0 |
@@ -0,0 +1,1826 @@
+# The MIT License (MIT) # Copyright (c) 2014-2017 University of Bristol%0A#%0A# Permission is hereby granted, free of charge, to any person obtaining a copy%0A# of this software and associated documentation files (the %22Software%22), to deal%0A# in the Software without restriction, including without limitation the rights%0A# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell%0A# copies of the Software, and to permit persons to whom the Software is%0A# furnished to do so, subject to the following conditions:%0A#%0A# The above copyright notice and this permission notice shall be included in all%0A# copies or substantial portions of the Software.%0A#%0A# THE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND,%0A# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF%0A# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.%0A# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,%0A# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR%0A# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE%0A# OR OTHER DEALINGS IN THE SOFTWARE.%0A%0Afrom hyperstream.stream import StreamInstance%0Afrom hyperstream.tool import Tool, check_input_stream_count%0A%0A%0Aclass Apply(Tool):%0A %22%22%22%0A Simple tool that applies a function to every data item%0A %22%22%22%0A def __init__(self, func, include_timestamps=False):%0A super(Apply, self).__init__(func=func, include_timestamps=include_timestamps)%0A%0A @check_input_stream_count(1)%0A def _execute(self, sources, alignment_stream, interval):%0A for t, d in sources%5B0%5D.window(interval, force_calculation=True):%0A if self.include_timestamps:%0A yield StreamInstance(t, self.func(t, d))%0A else:%0A yield StreamInstance(t, self.func(d))%0A
|
|
90dbc7695af9cc4b83273e774a8e3f6eb0847170
|
Maximum sum of any given path
|
Arrays/maximum_sum_path.py
|
Arrays/maximum_sum_path.py
|
Python
| 0.999862 |
@@ -0,0 +1,1943 @@
+import unittest%0A%22%22%22%0AGiven two sorted arrays such that the arrays may have some common elements, find the maximum sum path%0Ato reach from beginning of any array to end of any array. We can switch from one array to another array%0Aonly at common elements.%0AInput: arr1: 2 3 7 10 12 arr2: 1 5 7 8%0AOutput: 35 (1 + 5 + 7 + 10 + 12)%0A%22%22%22%0A%0A%22%22%22%0AApproach:%0A1. Scan both arrays from left to right.%0A2. Keep two running sums, sum1 and sum2 for the two arrays.%0A3. Move pointer in array whose current element is smaller among the two, and add that element to%0A respective running sum.%0A4. When the corresponding elements from the two arrays are equal, take max(sum1, sum2) till that point,%0A and add the equal element to the max value.%0A5. Return the overall max sum as the maximum sum path.%0A%22%22%22%0A%0A%0Adef maximum_sum_of_a_path(list1, list2):%0A%0A end1 = len(list1)%0A end2 = len(list2)%0A%0A sum1 = 0%0A sum2 = 0%0A max_sum = 0%0A%0A i = 0%0A j = 0%0A%0A while i %3C end1 and j %3C end2:%0A if list1%5Bi%5D %3C list2%5Bj%5D:%0A sum1 += list1%5Bi%5D%0A i += 1%0A elif list1%5Bi%5D %3E list2%5Bj%5D:%0A sum2 += list2%5Bj%5D%0A j += 1%0A else:%0A max_sum += max(%5Bsum1, sum2%5D)%0A max_sum += list1%5Bi%5D%0A i += 1%0A j += 1%0A sum1 = 0%0A sum2 = 0%0A%0A while i %3C end1:%0A sum1 += list1%5Bi%5D%0A i += 1%0A%0A while j %3C end2:%0A sum2 += list2%5Bj%5D%0A j += 1%0A%0A max_sum += max(%5Bsum1, sum2%5D)%0A%0A return max_sum%0A%0A%0Aclass TestMaxSumPath(unittest.TestCase):%0A%0A def test_max_sum_path(self):%0A arr1 = %5B2, 3, 7, 10, 12%5D%0A arr2 = %5B1, 5, 7, 8%5D%0A%0A self.assertEqual(maximum_sum_of_a_path(arr1, arr2), 35)%0A%0A arr1 = %5B10, 12%5D%0A arr2 = %5B5, 7, 9%5D%0A%0A self.assertEqual(maximum_sum_of_a_path(arr1, arr2), 22)%0A%0A arr1 = %5B2, 3, 7, 10, 12, 15, 30, 34%5D%0A arr2 = %5B1, 5, 7, 8, 10, 15, 16, 19%5D%0A%0A self.assertEqual(maximum_sum_of_a_path(arr1, arr2), 122)%0A%0A%0A
|
|
ef06864a991572d7ae610f9a249b024f967b1eb9
|
Add test.util.mock_call_with_name
|
linkins/test/util.py
|
linkins/test/util.py
|
Python
| 0.000008 |
@@ -0,0 +1,561 @@
+import mock%0A%0Aclass mock_call_with_name(object):%0A %22%22%22Like mock.call but takes the name of the call as its first%0A argument. mock.call requires chained methods to define its%0A name. This can be a problem, for example, if you need create%0A mock.call().__enter__().__iter__(). You can optionally use%0A mock._Call but you might as well use a tuple since its constructor%0A requires a tuple of the form (name, args, kwargs).%0A%0A %22%22%22%0A def __new__(self, name, *args, **kwargs):%0A return mock._Call(%0A (name, args, kwargs)%0A )%0A
|
|
dba311375a0f4cda1a3c522f5ac261dfb601b9c5
|
Create gee_init.py
|
pyEOM/gee_init.py
|
pyEOM/gee_init.py
|
Python
| 0.000044 |
@@ -0,0 +1,49 @@
+MY_SERVICE_ACCOUNT = ''%0AMY_PRIVATE_KEY_FILE = ''%0A
|
|
148991a27670d26a2eb29f0964078b4d656bbcec
|
Create __init__.py
|
pydyn/__init__.py
|
pydyn/__init__.py
|
Python
| 0.000429 |
@@ -0,0 +1,222 @@
+# Copyright (C) 2014-2015 Julius Susanto. All rights reserved.%0A# Use of this source code is governed by a BSD-style%0A# license that can be found in the LICENSE file.%0A%0A%22%22%22%0APYPOWER-Dynamics%0ATime-domain simulation engine%0A%0A%22%22%22%0A
|
|
06e4fd4b7d4cc4c984a05887fce00f7c8bbdc174
|
Add missing tests for messaging notifer plugin
|
tests/notifiers/test_messaging.py
|
tests/notifiers/test_messaging.py
|
Python
| 0.000002 |
@@ -0,0 +1,1749 @@
+# Copyright 2014 Mirantis Inc.%0A# All Rights Reserved.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22); you may%0A# not use this file except in compliance with the License. You may obtain%0A# a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS, WITHOUT%0A# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the%0A# License for the specific language governing permissions and limitations%0A# under the License.%0A%0Aimport mock%0A%0Afrom osprofiler._notifiers import base%0Afrom tests import test%0A%0A%0Aclass MessagingTestCase(test.TestCase):%0A%0A def test_init_and_notify(self):%0A%0A messaging = mock.MagicMock()%0A context = %22context%22%0A transport = %22transport%22%0A project = %22project%22%0A service = %22service%22%0A host = %22host%22%0A%0A notify_func = base.Notifier.factory(%22Messaging%22, messaging, context,%0A transport, project, service, host)%0A%0A messaging.Notifier.assert_called_once_with(%0A transport, publisher_id=host, driver=%22messaging%22, topic=%22profiler%22)%0A%0A info = %7B%0A %22a%22: 10%0A %7D%0A notify_func(info)%0A%0A expected_data = %7B%22project%22: project, %22service%22: service%7D%0A expected_data.update(info)%0A messaging.Notifier().info.assert_called_once_with(%0A context, %22profiler.%25s%22 %25 service, expected_data)%0A%0A messaging.reset_mock()%0A notify_func(info, context=%22my_context%22)%0A messaging.Notifier().info.assert_called_once_with(%0A %22my_context%22, %22profiler.%25s%22 %25 service, expected_data)%0A
|
|
fb2af0db2fc6d2d63bb377d7818ed1d03cb5cc9a
|
add nqueens.py
|
python/nqueens.py
|
python/nqueens.py
|
Python
| 0.001659 |
@@ -0,0 +1,294 @@
+#!/usr/bin/python%0A%0A# http://code.activestate.com/recipes/576647-eight-queens-six-lines/%0A%0Afrom itertools import permutations%0A%0AN = 8%0Acols = range(N)%0Afor perm in permutations(cols):%0A if (N == len(set(perm%5Bi%5D-i for i in cols))%0A == len(set(perm%5Bi%5D+i for i in cols))):%0A print perm%0A
|
|
4bfe33373ebf095623173f945757693997a65ce3
|
Add a simple test for the new AWS::LanguageExtensions transform (#2074)
|
tests/test_language_extensions.py
|
tests/test_language_extensions.py
|
Python
| 0.000706 |
@@ -0,0 +1,2131 @@
+import unittest%0A%0Afrom troposphere import AWSHelperFn, Parameter, Template%0Afrom troposphere.sqs import Queue%0A%0A%0Aclass TestServerless(unittest.TestCase):%0A def test_transform(self):%0A t = Template()%0A t.set_version(%222010-09-09%22)%0A t.set_transform(%22AWS::LanguageExtensions%22)%0A%0A self.assertEqual(%0A t.to_dict(),%0A %7B%0A %22AWSTemplateFormatVersion%22: %222010-09-09%22,%0A %22Transform%22: %22AWS::LanguageExtensions%22,%0A %22Resources%22: %7B%7D,%0A %7D,%0A )%0A%0A def test_length_function(self):%0A class Length(AWSHelperFn):%0A def __init__(self, data: object) -%3E None:%0A self.data = %7B%22Fn::Length%22: data%7D%0A%0A t = Template()%0A t.set_version(%222010-09-09%22)%0A t.set_transform(%22AWS::LanguageExtensions%22)%0A%0A queue_list = t.add_parameter(Parameter(%22QueueList%22, Type=%22CommaDelimitedList%22))%0A queue_name = t.add_parameter(%0A Parameter(%0A %22QueueNameParam%22, Description=%22Name for your SQS queue%22, Type=%22String%22%0A )%0A )%0A%0A t.add_resource(%0A Queue(%0A %22Queue%22,%0A QueueName=queue_name.ref(),%0A DelaySeconds=Length(queue_list.ref()),%0A )%0A )%0A%0A self.assertEqual(%0A t.to_dict(),%0A %7B%0A %22AWSTemplateFormatVersion%22: %222010-09-09%22,%0A %22Transform%22: %22AWS::LanguageExtensions%22,%0A %22Parameters%22: %7B%0A %22QueueList%22: %7B%22Type%22: %22CommaDelimitedList%22%7D,%0A %22QueueNameParam%22: %7B%0A %22Description%22: %22Name for your SQS queue%22,%0A %22Type%22: %22String%22,%0A %7D,%0A %7D,%0A %22Resources%22: %7B%0A %22Queue%22: %7B%0A %22Type%22: %22AWS::SQS::Queue%22,%0A %22Properties%22: %7B%0A %22QueueName%22: %7B%22Ref%22: %22QueueNameParam%22%7D,%0A %22DelaySeconds%22: %7B%22Fn::Length%22: %7B%22Ref%22: %22QueueList%22%7D%7D,%0A %7D,%0A %7D%0A %7D,%0A %7D,%0A )%0A
|
|
157a7d00a9d650728495726e9217591a678ec5a9
|
add docstrings for response
|
mailthon/response.py
|
mailthon/response.py
|
class Response(object):
def __init__(self, pair):
status, message = pair
self.status_code = status
self.message = message
@property
def ok(self):
return self.status_code == 250
class SendmailResponse(Response):
def __init__(self, pair, rejected):
Response.__init__(self, pair)
self.rejected = {
addr: Response(pair)
for addr, pair in rejected.items()
}
@property
def ok(self):
return (Response.ok.fget(self) and
not self.rejected)
|
Python
| 0.000001 |
@@ -1,261 +1,997 @@
-class Response(object):%0A def __init__(self, pair):%0A status, message = pair%0A self.status_code = status%0A self.message = message%0A%0A @property%0A def ok(self):%0A return self.status_code == 250%0A%0A%0Aclass SendmailResponse(Response):
+%22%22%22%0A mailthon.response%0A ~~~~~~~~~~~~~~~~~%0A%0A Implements the Response objects.%0A%22%22%22%0A%0A%0Aclass Response(object):%0A %22%22%22%0A Encapsulates a (status_code, message) tuple%0A returned by a server when the %60%60NOOP%60%60%0A command is called.%0A%0A :param pair: A (status_code, message) pair.%0A %22%22%22%0A%0A def __init__(self, pair):%0A status, message = pair%0A self.status_code = status%0A self.message = message%0A%0A @property%0A def ok(self):%0A %22%22%22%0A Tells whether the Response object is ok-%0A that everything went well. Returns true%0A if the status code is 250, false otherwise.%0A %22%22%22%0A return self.status_code == 250%0A%0A%0Aclass SendmailResponse(Response):%0A %22%22%22%0A Encapsulates a (status_code, message) tuple%0A as well as a mapping of email-address to%0A (status_code, message) tuples that can be%0A attained by the NOOP and the SENDMAIL%0A command.%0A%0A :param pair: The response pair.%0A :param rejected: Rejected receipients.%0A %22%22%22%0A
%0A
@@ -1206,32 +1206,151 @@
def ok(self):%0A
+ %22%22%22%0A Returns True only if no addresses were%0A rejected and if the status code is 250.%0A %22%22%22%0A
return (
|
2b2ff2a528f6effd219bd13cd754c33b55e82e61
|
add __init__.py, initialized bootstrap extension
|
app/__init__.py
|
app/__init__.py
|
Python
| 0.000019 |
@@ -0,0 +1,331 @@
+from flask import Flask %0Afrom flask.ext.bootstrap import Bootstrap%0Afrom config import config %0A%0Abootstrap = Bootstrap()%0Amoment = Moment()%0A%0Adef create_app(config_name):%0A%09app = Flask(__name__)%0A%09app.config.from_object(config%5Bconfig_name%5D)%0A%09config%5Bconfig_name%5D.init_app(app)%0A%0A%09bootstrap.init_app(app)%0A%09moment.init_app(app)%0A%0A%09return app
|
|
264310074faf54f25e77cc83f9d8e6ebcc0d8cf9
|
Update Wink requirement
|
homeassistant/components/wink.py
|
homeassistant/components/wink.py
|
"""
Support for Wink hubs.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/wink/
"""
import logging
import voluptuous as vol
from homeassistant.helpers import discovery
from homeassistant.const import (
CONF_ACCESS_TOKEN, ATTR_BATTERY_LEVEL, CONF_EMAIL, CONF_PASSWORD,
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP)
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['python-wink==1.0.0', 'pubnubsub-handler==0.0.7']
_LOGGER = logging.getLogger(__name__)
CHANNELS = []
DOMAIN = 'wink'
SUBSCRIPTION_HANDLER = None
CONF_CLIENT_ID = 'client_id'
CONF_CLIENT_SECRET = 'client_secret'
CONF_USER_AGENT = 'user_agent'
CONF_OATH = 'oath'
CONF_APPSPOT = 'appspot'
CONF_DEFINED_BOTH_MSG = 'Remove access token to use oath2.'
CONF_MISSING_OATH_MSG = 'Missing oath2 credentials.'
CONF_TOKEN_URL = "https://winkbearertoken.appspot.com/token"
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Inclusive(CONF_EMAIL, CONF_APPSPOT,
msg=CONF_MISSING_OATH_MSG): cv.string,
vol.Inclusive(CONF_PASSWORD, CONF_APPSPOT,
msg=CONF_MISSING_OATH_MSG): cv.string,
vol.Inclusive(CONF_CLIENT_ID, CONF_OATH,
msg=CONF_MISSING_OATH_MSG): cv.string,
vol.Inclusive(CONF_CLIENT_SECRET, CONF_OATH,
msg=CONF_MISSING_OATH_MSG): cv.string,
vol.Exclusive(CONF_EMAIL, CONF_OATH,
msg=CONF_DEFINED_BOTH_MSG): cv.string,
vol.Exclusive(CONF_ACCESS_TOKEN, CONF_OATH,
msg=CONF_DEFINED_BOTH_MSG): cv.string,
vol.Exclusive(CONF_ACCESS_TOKEN, CONF_APPSPOT,
msg=CONF_DEFINED_BOTH_MSG): cv.string,
vol.Optional(CONF_USER_AGENT, default=None): cv.string
})
}, extra=vol.ALLOW_EXTRA)
WINK_COMPONENTS = [
'binary_sensor', 'sensor', 'light', 'switch', 'lock', 'cover', 'climate',
'fan', 'alarm_control_panel'
]
def setup(hass, config):
"""Set up the Wink component."""
import pywink
import requests
from pubnubsubhandler import PubNubSubscriptionHandler
user_agent = config[DOMAIN].get(CONF_USER_AGENT)
if user_agent:
pywink.set_user_agent(user_agent)
access_token = config[DOMAIN].get(CONF_ACCESS_TOKEN)
client_id = config[DOMAIN].get('client_id')
if access_token:
pywink.set_bearer_token(access_token)
elif client_id:
email = config[DOMAIN][CONF_EMAIL]
password = config[DOMAIN][CONF_PASSWORD]
client_id = config[DOMAIN]['client_id']
client_secret = config[DOMAIN]['client_secret']
pywink.set_wink_credentials(email, password, client_id,
client_secret)
else:
email = config[DOMAIN][CONF_EMAIL]
password = config[DOMAIN][CONF_PASSWORD]
payload = {'username': email, 'password': password}
token_response = requests.post(CONF_TOKEN_URL, data=payload)
token = token_response.text.split(':')[1].split()[0].rstrip('<br')
pywink.set_bearer_token(token)
hass.data[DOMAIN] = {}
hass.data[DOMAIN]['entities'] = []
hass.data[DOMAIN]['pubnub'] = PubNubSubscriptionHandler(
pywink.get_subscription_key(),
pywink.wink_api_fetch)
def start_subscription(event):
"""Start the pubnub subscription."""
hass.data[DOMAIN]['pubnub'].subscribe()
hass.bus.listen(EVENT_HOMEASSISTANT_START, start_subscription)
def stop_subscription(event):
"""Stop the pubnub subscription."""
hass.data[DOMAIN]['pubnub'].unsubscribe()
hass.bus.listen(EVENT_HOMEASSISTANT_STOP, stop_subscription)
def force_update(call):
"""Force all devices to poll the Wink API."""
_LOGGER.info("Refreshing Wink states from API")
for entity in hass.data[DOMAIN]['entities']:
entity.update_ha_state(True)
hass.services.register(DOMAIN, 'Refresh state from Wink', force_update)
# Load components for the devices in Wink that we support
for component in WINK_COMPONENTS:
discovery.load_platform(hass, component, DOMAIN, {}, config)
return True
class WinkDevice(Entity):
"""Representation a base Wink device."""
def __init__(self, wink, hass):
"""Initialize the Wink device."""
self.hass = hass
self.wink = wink
self._battery = self.wink.battery_level()
hass.data[DOMAIN]['pubnub'].add_subscription(
self.wink.pubnub_channel, self._pubnub_update)
hass.data[DOMAIN]['entities'].append(self)
def _pubnub_update(self, message):
try:
if message is None:
_LOGGER.error("Error on pubnub update for %s "
"polling API for current state", self.name)
self.update_ha_state(True)
else:
self.wink.pubnub_update(message)
self.update_ha_state()
except (ValueError, KeyError, AttributeError):
_LOGGER.error("Error in pubnub JSON for %s "
"polling API for current state", self.name)
self.update_ha_state(True)
@property
def name(self):
"""Return the name of the device."""
return self.wink.name()
@property
def available(self):
"""True if connection == True."""
return self.wink.available()
def update(self):
"""Update state of the device."""
self.wink.update_state()
@property
def should_poll(self):
"""Only poll if we are not subscribed to pubnub."""
return self.wink.pubnub_channel is None
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._battery:
return {
ATTR_BATTERY_LEVEL: self._battery_level,
}
@property
def _battery_level(self):
"""Return the battery level."""
if self.wink.battery_level() is not None:
return self.wink.battery_level() * 100
|
Python
| 0 |
@@ -555,13 +555,13 @@
er==
+1.
0.0
-.7
'%5D%0A%0A
|
cac34640b3fafb57e645f1443d258918cfffcf9b
|
Fix compatibility for Python 3.3
|
astm/protocol.py
|
astm/protocol.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
import logging
from threading import _Timer, RLock
from collections import namedtuple
from .asynclib import AsyncChat
from .records import HeaderRecord, TerminatorRecord
from .constants import STX, CRLF, ENQ, ACK, NAK, EOT, ENCODING
log = logging.getLogger(__name__)
#: ASTM protocol states set.
STATE = namedtuple('ASTMState', ['init', 'opened', 'transfer'])(*range(3))
__all__ = ['STATE', 'ASTMProtocol']
class ASTMProtocol(AsyncChat):
"""Common ASTM protocol routines."""
#: ASTM header record class.
astm_header = HeaderRecord
#: ASTM terminator record class.
astm_terminator = TerminatorRecord
#: Flag about chunked transfer.
is_chunked_transfer = None
#: Operation timeout value.
timeout = None
encoding = ENCODING
strip_terminator = False
_last_recv_data = None
_last_sent_data = None
_state = None
_lock = RLock()
_timer = None
_timer_cls = _Timer
def __init__(self, sock=None, map=None, timeout=None):
super(ASTMProtocol, self).__init__(sock, map)
if timeout is not None:
self.timeout = timeout
def found_terminator(self):
while self.inbox:
data = self.inbox.popleft()
if not data:
continue
self.dispatch(data)
def dispatch(self, data):
"""Dispatcher of received data."""
self._last_recv_data = data
if data == ENQ:
handler = self.on_enq
elif data == ACK:
handler = self.on_ack
elif data == NAK:
handler = self.on_nak
elif data == EOT:
handler = self.on_eot
elif data.startswith(STX): # this looks like a message
handler = self.on_message
else:
handler = lambda: self.default_handler(data)
with self._lock:
resp = handler()
self.start_timer()
if resp is not None:
self.push(resp)
def default_handler(self, data):
raise ValueError('Unable to dispatch data: %r', data)
def push(self, data):
self._last_sent_data = data
return super(ASTMProtocol, self).push(data)
def start_timer(self):
if self.timeout is None:
return
self.stop_timer()
self._timer = self._timer_cls(self.timeout, self.on_timeout)
self._timer.daemon = True
self._timer.start()
log.debug('Timer %r started', self._timer)
def stop_timer(self):
if self.timeout is None or self._timer is None:
return
if self._timer is not None and self._timer.is_alive():
self._timer.cancel()
log.debug('Timer %r stopped', self._timer)
self._timer = None
def on_enq(self):
"""Calls on <ENQ> message receiving."""
def on_ack(self):
"""Calls on <ACK> message receiving."""
def on_nak(self):
"""Calls on <NAK> message receiving."""
def on_eot(self):
"""Calls on <EOT> message receiving."""
def on_message(self):
"""Calls on ASTM message receiving."""
def _get_state(self):
return self._state
def _set_state(self, value):
assert value in STATE
self._state = value
#: ASTM handler state value:
#:
#: - ``init``: Neutral state
#: - ``opened``: ENQ message was sent, waiting for ACK
#: - ``transfer``: Data transfer processing
#:
state = property(_get_state, _set_state)
def set_init_state(self):
"""Sets handler state to INIT (0).
In ASTM specification this state also called as `neutral` which means
that handler is ready to establish data transfer.
"""
self.terminator = 1
self.state = STATE.init
self.on_init_state()
log.info('Switched to init state')
def set_opened_state(self):
"""Sets handler state to OPENED (1).
Intermediate state that only means for client implementation. On this
state client had already sent <ENQ> and awaits for <ACK> or
<NAK> response. On <ACK> it switched his state to `transfer`.
"""
self.terminator = 1
self.state = STATE.opened
self.on_opened_state()
log.info('Switched to opened state')
def set_transfer_state(self):
"""Sets handler state to TRANSFER (2).
In this state handler is able to send or receive ASTM messages depending
on his role (client or server). At the end of data transfer client
should send <EOT> and switch state to `init`.
"""
self.terminator = [CRLF, EOT]
self.state = STATE.transfer
self.on_transfer_state()
log.info('Switched to transfer state')
def on_init_state(self):
"""Calls on set state INIT (0)"""
def on_opened_state(self):
"""Calls on set state OPENED (1)"""
def on_transfer_state(self):
"""Calls on set state TRANSFER (2)"""
def on_timeout(self):
"""Calls when timeout event occurs. Used to limit time for waiting
response data."""
|
Python
| 0.002084 |
@@ -247,17 +247,16 @@
import
-_
Timer, R
@@ -1132,17 +1132,16 @@
r_cls =
-_
Timer%0A%0A
|
387b5732c0b2231580ae04bf5088ef7ce59b0d84
|
Add script to normalize the spelling in a dataset
|
normalize_dataset.py
|
normalize_dataset.py
|
Python
| 0.000008 |
@@ -0,0 +1,2376 @@
+%22%22%22Create multilabel data set with normalized spelling.%0AThe input consists of a directory of text files containing the dataset in%0Ahistoric spelling.%0A%0AThe data set consists of:%0A%3Csentence id%3E%5Ct%3Csentence%3E%5CtEmotie_Liefde (embodied emotions labels separated by%0A_)%0A%3Csentence id%3E%5Ct%3Csentence%3E%5CtNone ('None' if no words were tagged)%0A%0AUsage: python normalize_dataset.py %3Cinput dir%3E %3Coutput dir%3E%0A%22%22%22%0Aimport argparse%0Aimport codecs%0Aimport os%0Afrom collections import Counter%0Aimport json%0A%0A%0Aif __name__ == '__main__':%0A parser = argparse.ArgumentParser()%0A parser.add_argument('input_dir', help='the name of the directory '%0A 'containing text files that should be normalized.')%0A parser.add_argument('output_dir', help='the directory where the '%0A 'normalized data files should be saved.')%0A args = parser.parse_args()%0A%0A input_dir = args.input_dir%0A output_dir = args.output_dir%0A%0A # load hist2modern dictionary%0A with codecs.open('hist2modern_bwnt.json', 'rb', 'utf-8') as f:%0A full_dict = json.load(f, 'utf-8')%0A%0A # create simple historic word -%3E modern word mapping%0A # (full_dict may contain multiple alternatives for a word)%0A hist2modern = %7B%7D%0A for w in full_dict.keys():%0A if w not in full_dict%5Bw%5D:%0A c = Counter(full_dict%5Bw%5D)%0A hist2modern%5Bw%5D = c.most_common()%5B0%5D%5B0%5D%0A print '#words in dict: %7B%7D'.format(len(hist2modern))%0A%0A text_files = %5Bfi for fi in os.listdir(input_dir) if fi.endswith('.txt')%5D%0A for text_file in text_files:%0A print text_file%0A%0A in_file = os.path.join(input_dir, text_file)%0A out_file = os.path.join(output_dir, text_file)%0A with codecs.open(in_file, 'rb', 'utf-8') as f:%0A lines = f.readlines()%0A%0A with codecs.open(out_file, 'wb', 'utf-8') as f:%0A for line in lines:%0A parts = line.split('%5Ct')%0A words = parts%5B1%5D.split(' ')%0A%0A new_words = %5B%5D%0A for w in words:%0A wo = w.lower()%0A if wo in hist2modern.keys():%0A new_words.append(hist2modern%5Bwo%5D)%0A else:%0A new_words.append(w)%0A%0A f.write(u'%7B%7D%5Ct%7B%7D%5Ct%7B%7D'.format(parts%5B0%5D,%0A ' '.join(new_words),%0A parts%5B2%5D))%0A
|
|
2131c79441a47701d2f0bb16c94e111bec8a4716
|
Use PIL.Image.
|
avatar/models.py
|
avatar/models.py
|
import datetime
import os
import hashlib
from django.conf import settings
from django.db import models
from django.core.files.base import ContentFile
from django.core.files.storage import get_storage_class
from django.utils.translation import ugettext as _
from django.utils import six
from django.db.models import signals
try:
from django.utils.encoding import force_bytes
except ImportError:
force_bytes = str
from avatar.util import get_username
try:
from PIL import Image
except ImportError:
import Image
try:
from django.utils.timezone import now
except ImportError:
now = datetime.datetime.now
from avatar.util import invalidate_cache
from avatar.settings import (AVATAR_STORAGE_DIR, AVATAR_RESIZE_METHOD,
AVATAR_MAX_AVATARS_PER_USER, AVATAR_THUMB_FORMAT,
AVATAR_HASH_USERDIRNAMES, AVATAR_HASH_FILENAMES,
AVATAR_THUMB_QUALITY, AUTO_GENERATE_AVATAR_SIZES,
AVATAR_DEFAULT_SIZE, AVATAR_STORAGE,
AVATAR_CLEANUP_DELETED)
avatar_storage = get_storage_class(AVATAR_STORAGE)()
def avatar_file_path(instance=None, filename=None, size=None, ext=None):
tmppath = [AVATAR_STORAGE_DIR]
if AVATAR_HASH_USERDIRNAMES:
tmp = hashlib.md5(get_username(instance.user)).hexdigest()
tmppath.extend([tmp[0], tmp[1], get_username(instance.user)])
else:
tmppath.append(get_username(instance.user))
if not filename:
# Filename already stored in database
filename = instance.avatar.name
if ext and AVATAR_HASH_FILENAMES:
# An extension was provided, probably because the thumbnail
# is in a different format than the file. Use it. Because it's
# only enabled if AVATAR_HASH_FILENAMES is true, we can trust
# it won't conflict with another filename
(root, oldext) = os.path.splitext(filename)
filename = root + "." + ext
else:
# File doesn't exist yet
if AVATAR_HASH_FILENAMES:
(root, ext) = os.path.splitext(filename)
filename = hashlib.md5(force_bytes(filename)).hexdigest()
filename = filename + ext
if size:
tmppath.extend(['resized', str(size)])
tmppath.append(os.path.basename(filename))
return os.path.join(*tmppath)
def find_extension(format):
format = format.lower()
if format == 'jpeg':
format = 'jpg'
return format
class Avatar(models.Model):
user = models.ForeignKey(getattr(settings, 'AUTH_USER_MODEL', 'auth.User'))
primary = models.BooleanField(default=False)
avatar = models.ImageField(max_length=1024,
upload_to=avatar_file_path,
storage=avatar_storage,
blank=True)
date_uploaded = models.DateTimeField(default=now)
def __unicode__(self):
return _(six.u('Avatar for %s')) % self.user
def save(self, *args, **kwargs):
avatars = Avatar.objects.filter(user=self.user)
if self.pk:
avatars = avatars.exclude(pk=self.pk)
if AVATAR_MAX_AVATARS_PER_USER > 1:
if self.primary:
avatars = avatars.filter(primary=True)
avatars.update(primary=False)
else:
avatars.delete()
super(Avatar, self).save(*args, **kwargs)
def thumbnail_exists(self, size):
return self.avatar.storage.exists(self.avatar_name(size))
def create_thumbnail(self, size, quality=None):
# invalidate the cache of the thumbnail with the given size first
invalidate_cache(self.user, size)
try:
orig = self.avatar.storage.open(self.avatar.name, 'rb')
image = Image.open(orig)
quality = quality or AVATAR_THUMB_QUALITY
w, h = image.size
if w != size or h != size:
if w > h:
diff = int((w - h) / 2)
image = image.crop((diff, 0, w - diff, h))
else:
diff = int((h - w) / 2)
image = image.crop((0, diff, w, h - diff))
if image.mode != "RGB":
image = image.convert("RGB")
image = image.resize((size, size), AVATAR_RESIZE_METHOD)
thumb = six.BytesIO()
image.save(thumb, AVATAR_THUMB_FORMAT, quality=quality)
thumb_file = ContentFile(thumb.getvalue())
else:
thumb_file = ContentFile(orig)
thumb = self.avatar.storage.save(self.avatar_name(size), thumb_file)
except IOError:
return # What should we do here? Render a "sorry, didn't work" img?
def avatar_url(self, size):
return self.avatar.storage.url(self.avatar_name(size))
def get_absolute_url(self):
return self.avatar_url(AVATAR_DEFAULT_SIZE)
def avatar_name(self, size):
ext = find_extension(AVATAR_THUMB_FORMAT)
return avatar_file_path(
instance=self,
size=size,
ext=ext
)
def invalidate_avatar_cache(sender, instance, **kwargs):
invalidate_cache(instance.user)
def create_default_thumbnails(sender, instance, created=False, **kwargs):
invalidate_avatar_cache(sender, instance)
if created:
for size in AUTO_GENERATE_AVATAR_SIZES:
instance.create_thumbnail(size)
def remove_avatar_images(instance=None, **kwargs):
for size in AUTO_GENERATE_AVATAR_SIZES:
if instance.thumbnail_exists(size):
instance.avatar.storage.delete(instance.avatar_name(size))
instance.avatar.storage.delete(instance.avatar.name)
signals.post_save.connect(create_default_thumbnails, sender=Avatar)
signals.post_delete.connect(invalidate_avatar_cache, sender=Avatar)
if AVATAR_CLEANUP_DELETED:
signals.post_delete.connect(remove_avatar_images, sender=Avatar)
|
Python
| 0 |
@@ -33,16 +33,38 @@
hashlib
+%0Afrom PIL import Image
%0A%0Afrom d
@@ -480,77 +480,8 @@
me%0A%0A
-try:%0A from PIL import Image%0Aexcept ImportError:%0A import Image%0A%0A
try:
|
dee535c8566d0e542891ed10939eec6448483a6f
|
read in cenque galaxy catalog
|
code/centralms.py
|
code/centralms.py
|
Python
| 0 |
@@ -0,0 +1,2459 @@
+'''%0A%0A%0A%0A'''%0Aimport h5py%0Aimport numpy as np%0A%0A# --- local --- %0Aimport util as UT%0A%0A%0Aclass CentralMS(object):%0A%0A def __init__(self, cenque='default'):%0A ''' This object reads in the star-forming and quenching%0A galaxies generated from the CenQue project and is an object%0A for those galaxies. Unlike CenQue, this object WILL NOT%0A have extensive functions and will act as a data catalog. %0A %0A '''%0A self.cenque = cenque%0A self.mass = None%0A self.sfr = None%0A self.ssfr = None %0A%0A def _Read_CenQue(self): %0A ''' Read in SF and Quenching galaixes generated from %0A the CenQue project. %0A '''%0A if self.cenque == 'default': %0A tf = 7 %0A abcrun = 'RHOssfrfq_TinkerFq_Std'%0A prior = 'updated'%0A else: %0A raise NotImplementedError%0A%0A file = ''.join(%5BUT.dat_dir(), 'cenque/',%0A 'sfms.centrals.', %0A 'tf', str(tf), %0A '.abc_', abcrun, %0A '.prior_', prior, %0A '.hdf5'%5D) %0A%0A # read in the file and save to object%0A f = h5py.File(file, 'r') %0A grp = f%5B'data'%5D %0A for col in grp.keys(): %0A if col == 'mass': %0A # make sure to mark as SHAM mass%0A setattr(self, 'M_sham', grp%5Bcol%5D%5B:%5D) %0A elif col in %5B'sfr', 'ssfr'%5D:%0A continue %0A else: %0A setattr(self, col, grp%5Bcol%5D%5B:%5D)%0A f.close() %0A return None %0A%0A%0Adef AssignSFR0(cms): %0A ''' Assign initial SFRs to the cms object based on tsnap_genesis %0A (time when the halo enters the catalog) and mass_genesis%0A '''%0A if 'tsnap_genesis' not in cms.__dict__.keys(): %0A # Most likely you did not read in CenQue catalog!%0A raise ValueError%0A%0A # Assign SFR to star-forming galaxies %0A sfr_class%5Bstarforming%5D = 'star-forming'%0A mu_sf_sfr = AverageLogSFR_sfms(%0A mass%5Bstarforming%5D, %0A redshift%5Bstarforming%5D, %0A sfms_prop=sfms_dict)%0A sigma_sf_sfr = ScatterLogSFR_sfms(%0A mass%5Bstarforming%5D, %0A redshift%5Bstarforming%5D,%0A sfms_prop=sfms_dict)%0A avg_sfr%5Bstarforming%5D = mu_sf_sfr%0A delta_sfr%5Bstarforming%5D = sigma_sf_sfr * np.random.randn(ngal_sf)%0A sfr%5Bstarforming%5D = mu_sf_sfr + delta_sfr%5Bstarforming%5D%0A ssfr%5Bstarforming%5D = sfr%5Bstarforming%5D - mass%5Bstarforming%5D%0A%0A%0A%0A%0A%0Aif __name__=='__main__': %0A cms = CentralMS()%0A cms._Read_CenQue()%0A
|
|
65e689dd66124fcaa0ce8ab9f5029b727fba18e2
|
Add solution for compare version numbers
|
src/compare_version_numbers.py
|
src/compare_version_numbers.py
|
Python
| 0 |
@@ -0,0 +1,1842 @@
+%22%22%22%0ASource : https://oj.leetcode.com/problems/compare-version-numbers/%0AAuthor : Changxi Wu%0ADate : 2015-01-23%0A%0ACompare two version numbers version1 and version2.%0A%0Aif version1 %3E version2 return 1, if version1 %3C version2 return -1, otherwise return 0.%0A%0AYou may assume that the version strings are non-empty and contain only digits and the . character.%0AThe . character does not represent a decimal point and is used to separate number sequences.%0AFor instance, 2.5 is not %22two and a half%22 for %22half way to version three%22, it is the fifth second-level revision of the second first-level revision.%0A%0AHere is an example of version numbers ordering:%0A 0.1 %3C 1.1 %3C 1.2 %3C 13.37%0A%0A%22%22%22%0A%0A# @param version1, a string%0A# @param version2, a string%0A# @return an integer%0Adef compareVersion(version1, version2):%0A list1 = map(int, version1.split('.'))%0A list2 = map(int, version2.split('.'))%0A max_length = len(list1) if len(list1) %3E len(list2) else len(list2)%0A for i in range(max_length):%0A value1 = value2 = 0%0A if i %3C len(list1):%0A value1 = list1%5Bi%5D%0A if i %3C len(list2):%0A value2 = list2%5Bi%5D%0A if value1 %3E value2:%0A return 1%0A elif value1 %3C value2:%0A return -1%0A return 0%0A%0Aif __name__ == '__main__':%0A version1_list = %5B'0.1','1.1','1.2','13.37','1','1.0'%5D%0A version2_list = %5B'1.1','1.2','13.37','1','13.37','1.0'%5D%0A result_list = %5B-1, -1, -1, 1, -1, 0%5D%0A max_length = len(version1_list)%0A success = True%0A for i in range(max_length):%0A result = compareVersion(version1_list%5Bi%5D, version2_list%5Bi%5D)%0A if result != result_list%5Bi%5D:%0A success = False%0A print 'Input:', version1_list%5Bi%5D, version2_list%5Bi%5D%0A print 'Output:', result%0A print 'Expected:', result_list%5Bi%5D%0A if success:%0A print 'All tests are passed'%0A%0A
|
|
0da01e405849da1d5876ec5a758c378aaf70fab2
|
add the canary
|
cleverhans/canary.py
|
cleverhans/canary.py
|
Python
| 0.999998 |
@@ -0,0 +1,2218 @@
+import numpy as np%0Aimport tensorflow as tf%0Afrom cleverhans.utils_tf import infer_devices%0A%0Adef run_canary():%0A %22%22%22%0A Runs some code that will crash if the GPUs / GPU driver are suffering from%0A a common bug. This helps to prevent contaminating results in the rest of%0A the library with incorrect calculations.%0A %22%22%22%0A%0A # Note: please do not edit this function unless you have access to a machine%0A # with GPUs suffering from the bug and can verify that the canary still%0A # crashes after your edits. Due to the transient nature of the GPU bug it is%0A # not possible to unit test the canary in our continuous integration system.%0A%0A # Try very hard not to let the canary affect the graph for the rest of the%0A # python process%0A canary_graph = tf.Graph()%0A with canary_graph.as_default():%0A devices = infer_devices()%0A num_devices = len(devices)%0A if num_devices %3C 3:%0A # We have never observed GPU failure when less than 3 GPUs were used%0A return%0A%0A v = np.random.RandomState(%5B2018, 10, 16%5D).randn(2, 2)%0A # Try very hard not to let this Variable end up in any collections used%0A # by the rest of the python process%0A w = tf.Variable(v, trainable=False, collections=%5B%5D)%0A loss = tf.reduce_sum(tf.square(w))%0A%0A grads = %5B%5D%0A for device in devices:%0A with tf.device(device):%0A grad, = tf.gradients(loss, w)%0A grads.append(grad)%0A%0A sess = tf.Session()%0A sess.run(tf.variables_initializer(%5Bw%5D))%0A grads = sess.run(grads)%0A first = grads%5B0%5D%0A for grad in grads%5B1:%5D:%0A if not np.allclose(first, grad):%0A # pylint can't see when we use variables via locals()%0A # pylint: disable=unused-variable%0A first_string = str(first)%0A grad_string = str(grad)%0A raise RuntimeError(%22Something is wrong with your GPUs or GPU driver.%22%0A %22%25(num_devices)d different GPUS were asked to %22%0A %22calculate the same 2x2 gradient. One returned %22%0A %22%25(first_string)s and another returned %22%0A %22%25(grad_string)s. This can usually be fixed by %22%0A %22rebooting the machine.%22 %25 locals())%0A sess.close()%0A%0Aif __name__ == %22__main__%22:%0A run_canary()%0A
|
|
c370edc980a34264f61e27d0dd288a7d6adf2d7e
|
Create consumer.py
|
bin/consumer.py
|
bin/consumer.py
|
Python
| 0.000005 |
@@ -0,0 +1,226 @@
+# Consumer example to show the producer works: J.Oxenberg%0Afrom kafka import KafkaConsumer %0Aconsumer = KafkaConsumer(b'test',bootstrap_servers=%22172.17.136.43%22) %0A#wait for messages %0Afor message in consumer: %0A print(message) %0A
|
|
6245656fc9681dc8f16598822d98789f2a712d9e
|
Remove import
|
cms/forms/wizards.py
|
cms/forms/wizards.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.contrib.sites.models import Site
from django.core.exceptions import PermissionDenied
from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy as _, get_language
from cms.constants import TEMPLATE_INHERITANCE_MAGIC, PAGE_TYPES_ID
from cms.exceptions import NoPermissionsException
from cms.models import Page, Title
from cms.models.titlemodels import EmptyTitle
from cms.utils import permissions
from cms.utils.conf import get_cms_setting
def user_has_view_permission(user, page=None):
"""
This code largely duplicates Page.has_view_permission(). We do this because
the source method requires a request object, which isn't appropriate in
this case. Fortunately, the source method (and its dependencies) use the
request object only to get the user object, when it isn't explicitly
provided and for caching permissions. We don't require caching here and we
can explicitly provide the user object.
"""
if not user:
return False
class FakeRequest(object):
pass
fake_request = FakeRequest()
can_see_unrestricted = get_cms_setting('PUBLIC_FOR') == 'all' or (
get_cms_setting('PUBLIC_FOR') == 'staff' and user.is_staff)
# Inherited and direct view permissions
is_restricted = bool(
permissions.get_any_page_view_permissions(fake_request, page))
if not is_restricted and can_see_unrestricted:
return True
elif not user.is_authenticated():
return False
if not is_restricted:
# a global permission was given to the request's user
if permissions.has_global_page_permission(
fake_request, page.site_id, user=user, can_view=True):
return True
else:
# a specific permission was granted to the request's user
if page.get_draft_object().has_generic_permission(
fake_request, "view", user=user):
return True
# The user has a normal django permission to view pages globally
opts = page._meta
codename = '%s.view_%s' % (opts.app_label, opts.object_name.lower())
return user.has_perm(codename)
class PageTypeSelect(forms.widgets.Select):
"""
Special widget for the page_type choice-field. This simply adds some JS for
hiding/showing the content field based on the selection of this select.
"""
class Media:
js = (
'cms/js/dist/bundle.admin.base.min.js',
'cms/js/modules/cms.base.js',
'cms/js/widgets/wizard.pagetypeselect.js',
)
class BaseCMSPageForm(forms.Form):
title = forms.CharField(label=_(u'Title'), max_length=255,
help_text=_(u"Provide a title for the new page."))
page_type = forms.ChoiceField(label=_(u'Page type'), required=False,
widget=PageTypeSelect())
content = forms.CharField(
label=_(u'Content'), widget=forms.Textarea, required=False,
help_text=_(u"Optional. If supplied, will be automatically added "
u"within a new text plugin."))
def __init__(self, instance=None, *args, **kwargs):
# Expect instance argument here, as we have to accept some of the
# ModelForm __init__() arguments here for the ModelFormMixin cbv
self.instance = instance
super(BaseCMSPageForm, self).__init__(*args, **kwargs)
if self.page:
site = self.page.site_id
else:
site = Site.objects.get_current()
# Either populate, or remove the page_type field
if 'page_type' in self.fields:
root = Page.objects.filter(publisher_is_draft=True,
reverse_id=PAGE_TYPES_ID,
site=site).first()
if root:
page_types = root.get_descendants()
else:
page_types = Page.objects.none()
if root and page_types:
# Set the choicefield's choices to the various page_types
language = get_language()
type_ids = page_types.values_list('pk', flat=True)
titles = Title.objects.filter(page__in=type_ids,
language=language)
choices = [('', '---------')]
for title in titles:
choices.append((title.page_id, title.title))
self.fields['page_type'].choices = choices
else:
# There are no page_types, so don't bother the user with an
# empty choice field.
del self.fields['page_type']
class CreateCMSPageForm(BaseCMSPageForm):
@staticmethod
def create_page_titles(page, title, languages):
# Import here due to potential circular dependency issues
from cms.api import create_title
for language in languages:
title_obj = page.get_title_obj(language=language, fallback=False)
if isinstance(title_obj, EmptyTitle):
create_title(language, title, page)
@staticmethod
def get_first_placeholder(page):
"""
Returns the first editable, non-static placeholder or None.
"""
for placeholder in page.get_placeholders():
if not placeholder.is_static and placeholder.is_editable:
return placeholder
else:
return None
def save(self, **kwargs):
from cms.api import create_page, add_plugin
from cms.cms_wizards import user_has_page_add_permission
# Check to see if this user has permissions to make this page. We've
# already checked this when producing a list of wizard entries, but this
# is to prevent people from possible form-hacking.
if 'sub_page' in self.cleaned_data:
sub_page = self.cleaned_data['sub_page']
else:
sub_page = False
if self.page:
if sub_page:
parent = self.page
position = "last-child"
else:
parent = self.page.parent
position = "right"
else:
parent = None
position = "last-child"
# Before we do this, verify this user has perms to do so.
if not (self.user.is_superuser or
user_has_page_add_permission(self.user, self.page,
position=position,
site=self.page.site_id)):
raise NoPermissionsException(
_(u"User does not have permission to add page."))
title = self.cleaned_data['title']
page = create_page(
title=title,
template=get_cms_setting('WIZARD_DEFAULT_TEMPLATE'),
language=self.language_code,
created_by=smart_text(self.user),
parent=parent,
in_navigation=True,
published=False
)
page_type = self.cleaned_data.get("page_type")
if page_type:
copy_target = Page.objects.filter(pk=page_type).first()
else:
copy_target = None
if copy_target:
# If the user selected a page type, copy that.
if not user_has_view_permission(self.user, copy_target):
raise PermissionDenied()
# Copy page attributes
copy_target._copy_attributes(page, clean=True)
page.save()
# Copy contents (for each language)
for lang in copy_target.get_languages():
copy_target._copy_contents(page, lang)
# Copy extensions
from cms.extensions import extension_pool
extension_pool.copy_extensions(copy_target, page)
else:
# If the user provided content, then use that instead.
content = self.cleaned_data.get('content')
if content and permissions.has_plugin_permission(
self.user, get_cms_setting('WIZARD_CONTENT_PLUGIN'), "add"):
placeholder = self.get_first_placeholder(page)
if placeholder:
add_plugin(**{
'placeholder': placeholder,
'plugin_type': get_cms_setting('WIZARD_CONTENT_PLUGIN'),
'language': self.language_code,
get_cms_setting('WIZARD_CONTENT_PLUGIN_BODY'): content
})
return page
class CreateCMSSubPageForm(CreateCMSPageForm):
sub_page = forms.BooleanField(initial=True, widget=forms.HiddenInput)
|
Python
| 0 |
@@ -326,36 +326,8 @@
port
- TEMPLATE_INHERITANCE_MAGIC,
PAG
|
70b312bde16a8c4fca47e4782f2293f0b96f9751
|
Add test_datagen2.py
|
cnn/test_datagen2.py
|
cnn/test_datagen2.py
|
Python
| 0.000213 |
@@ -0,0 +1,1141 @@
+import os%0Aimport shutil%0Aimport numpy as np%0Afrom scipy.misc import toimage%0Aimport matplotlib.pyplot as plt%0Afrom keras.datasets import cifar10%0Afrom keras.preprocessing.image import ImageDataGenerator%0A%0Adef draw(X, filename):%0A plt.figure()%0A pos = 1%0A for i in range(X.shape%5B0%5D):%0A plt.subplot(4, 4, pos)%0A img = toimage(X%5Bi%5D)%0A plt.imshow(img)%0A plt.axis('off')%0A pos += 1%0A plt.savefig(filename)%0A%0Aif __name__ == '__main__':%0A img_rows, img_cols, img_channels = 32, 32, 3%0A batch_size = 16%0A nb_classes = 10%0A%0A # CIFAR-10%E3%83%87%E3%83%BC%E3%82%BF%E3%82%92%E3%83%AD%E3%83%BC%E3%83%89%0A (X_train, y_train), (X_test, y_test) = cifar10.load_data()%0A%0A # %E7%94%BB%E7%B4%A0%E5%80%A4%E3%82%920-1%E3%81%AB%E5%A4%89%E6%8F%9B%0A X_train = X_train.astype('float32')%0A X_train /= 255.0%0A X_train = X_train%5B0:batch_size%5D%0A y_train = y_train%5B0:batch_size%5D%0A%0A draw(X_train, 'datagen_before.png')%0A%0A # %E3%83%87%E3%83%BC%E3%82%BF%E6%8B%A1%E5%BC%B5%0A datagen = ImageDataGenerator(%0A rotation_range=90,%0A zca_whitening=True%0A )%0A%0A datagen.fit(X_train)%0A g = datagen.flow(X_train, y_train, batch_size, shuffle=False)%0A batch = g.next()%0A print(batch%5B0%5D.shape)%0A print(batch%5B1%5D.shape)%0A%0A draw(batch%5B0%5D, 'datagen_after.png')%0A
|
|
2dd5afae12dc7d58c3349f2df2694eeb77ca0298
|
Test driving robot via serial input
|
examples/test_spinn_tracks4.py
|
examples/test_spinn_tracks4.py
|
Python
| 0 |
@@ -0,0 +1,1250 @@
+import nengo%0A%0Aimport nengo_pushbot%0Aimport numpy as np%0A%0Amodel = nengo.Network()%0Awith model:%0A input = nengo.Node(lambda t: %5B0.5*np.sin(t), 0.5*np.cos(t)%5D)%0A%0A a = nengo.Ensemble(nengo.LIF(100), dimensions=2)%0A #b = nengo.Ensemble(nengo.LIF(100), dimensions=2)%0A #c = nengo.Ensemble(nengo.LIF(100), dimensions=2)%0A #d = nengo.Ensemble(nengo.LIF(100), dimensions=2)%0A%0A #nengo.Connection(a, b, filter=0.01)%0A #nengo.Connection(b, c, filter=0.01)%0A #nengo.Connection(c, d, filter=0.01)%0A%0A #nengo.Connection(a, a, transform=%5B%5B1.1, 0%5D, %5B0, 1.1%5D%5D, filter=0.1)%0A #b = nengo.Ensemble(nengo.LIF(100), dimensions=2)%0A%0A bot = nengo_pushbot.PushBot(address=(0xFE, 0xFF, 1, 0, 0))%0A%0A tracks = nengo_pushbot.Tracks(bot)%0A #def printout(t, x):%0A # print t, x%0A # return %5B%5D%0A #tracks2 = nengo.Node(printout, size_in=2)%0A%0A nengo.Connection(input, a, filter=0.01)%0A #nengo.Connection(a, b, filter=0.01)%0A #nengo.Connection(b, c, filter=0.01)%0A #nengo.Connection(c, d, filter=0.01)%0A nengo.Connection(a, tracks, filter=0.01)%0A #nengo.Connection(b, tracks2, filter=0.01)%0A%0A#sim_normal = nengo.Simulator(model)%0A#sim_normal.run(5)%0A%0Aimport nengo_spinnaker%0Asim = nengo_spinnaker.Simulator(model, use_serial=True)%0Asim.run(1000)%0A%0A
|
|
f1826205782eb56ba6b478c70e671acae6872d35
|
Read similarity graph
|
exp/influence2/GraphReader2.py
|
exp/influence2/GraphReader2.py
|
Python
| 0.000016 |
@@ -0,0 +1,2338 @@
+try: %0A ctypes.cdll.LoadLibrary(%22/usr/local/lib/libigraph.so%22)%0Aexcept: %0A pass %0Aimport igraph %0Aimport numpy %0Afrom apgl.util.PathDefaults import PathDefaults %0Aimport logging %0A%0Aclass GraphReader2(object): %0A %22%22%22%0A A class to read the similarity graph generated from the Arnetminer dataset %0A %22%22%22%0A def __init__(self, field): %0A self.field = field%0A self.eps = 0.1%0A %0A dirName = PathDefaults.getDataDir() + %22reputation/%22 + self.field + %22/arnetminer/%22%0A self.coauthorFilename = dirName + %22coauthors.csv%22%0A self.coauthorMatrixFilename = dirName + %22coauthorSimilarity.npy%22%0A self.trainExpertsFilename = dirName + %22experts_train_matches%22 + %22.csv%22%0A self.testExpertsFilename = dirName + %22experts_test_matches%22 + %22.csv%22%0A %0A logging.debug(%22Publications filename: %22 + self.coauthorFilename)%0A logging.debug(%22Training experts filename: %22 + self.trainExpertsFilename)%0A logging.debug(%22Test experts filename: %22 + self.testExpertsFilename)%0A %0A def read(self):%0A %0A K = numpy.load(self.coauthorMatrixFilename)%0A K = K.tolist()%0A graph = igraph.Graph.Weighted_Adjacency(K, mode=%22PLUS%22, loops=False)%0A %0A print(graph.summary())%0A graph.simplify(combine_edges=sum) %0A graph.es%5B%22invWeight%22%5D = 1.0/numpy.array(graph.es%5B%22weight%22%5D) %0A %0A return graph %0A %0A def readExperts(self, train=False): %0A %22%22%22%0A Read the experts from a test file. Returns two lists: expertsList is the %0A list of their names, and expertsIdList is their integer ID. %0A %22%22%22%0A if not train:%0A logging.debug(%22Reading test experts list%22)%0A expertsFile = open(self.testExpertsFilename)%0A else: %0A logging.debug(%22Reading training experts list%22)%0A expertsFile = open(self.trainExpertsFilename)%0A %0A expertsList = expertsFile.readlines()%0A expertsFile.close()%0A %0A coauthorsFile = open(self.coauthorFilename)%0A coauthors = coauthorsFile.readlines() %0A coauthorsFile.close() %0A %0A expertsIdList = %5B%5D %0A %0A for expert in expertsList: %0A if expert in coauthors: %0A expertsIdList.append(coauthors.index(expert))%0A %0A return expertsList, expertsIdList
|
|
e598608f21e30aeeec1ea9a8f452047a270fdc4d
|
add setup.py to build C module 'counts'; in perspective, it should setup cbclib on various systems
|
cbclib/setup.py
|
cbclib/setup.py
|
Python
| 0.000002 |
@@ -0,0 +1,163 @@
+from distutils.core import setup, Extension%0A%0Asetup(%0A name=%22counts%22, version=%220.1%22,%0A ext_modules=%5BExtension(%22counts%22, %5B%22countsmodule.c%22, %22countscalc.c%22%5D)%5D%0A)%0A%0A
|
|
22769c9d84de432034ef592f94c77b5d5111599d
|
Create argparser.py
|
argparser.py
|
argparser.py
|
Python
| 0.000418 |
@@ -0,0 +1,1216 @@
+def get_args():%0A%09import argparse%0A%09import os%0A%09from sys import exit%0A%09%0A%09parser = argparse.ArgumentParser(description='Automates android memory dumping')%0A%09parser.add_argument('-n', '--samplepath', required=True,help='path of the malware sample-apk')%0A%09parser.add_argument('-i', '--interval', required=True, type=int, help='intervals for each memory dump in seconds')%0A%09parser.add_argument('-d', '--sdcard', type=int, required=False, help='dump will be saved on the sdcard of the android device instead of being transfered over TCP')%0A%09parser.add_argument('-o', '--outputpath', required=False, help='path of the output-path')%0A%09parser.add_argument('-c', '--customconfig', required=False, help='path of a custom avd config.ini')%0A%0A%09args = parser.parse_args()%0A%09if not os.path.isfile(args.samplepath) or (args.customconfig is not None and os.path.isfile(args.customconfig)):%0A%09%09raise Exception(%22error : one or more specified paths are not pointing to a file%22)%0A%09return args.samplepath, args.interval, args.sdcard, args.outputpath, args.customconfig%0A%09%0A%09%0Aif __name__ == '__main__':%0A%09import sys%0A%09get_args(sys.argv%5B1:%5D)%0A#AVDNAME = os.path.splitext(args.samplepath)%5B0%5D%0A#AVDPATH = args.samplepath%0A#os.path.isfile(fname) %0A#print(AVDNAME)%0A
|
|
d5f851b07da64edbf2676bdc5f40d17342bec29f
|
Update sds_detect.py
|
tendrl/node_agent/node_sync/sds_detect.py
|
tendrl/node_agent/node_sync/sds_detect.py
|
import uuid
import etcd
import gevent
from tendrl.commons.event import Event
from tendrl.commons.message import ExceptionMessage
from tendrl.commons.message import Message
from tendrl.node_agent.discovery.sds import manager as sds_manager
def sync():
try:
Event(
Message(
priority="debug",
publisher=NS.publisher_id,
payload={"message": "Running SDS detection"}
)
)
try:
sds_discovery_manager = sds_manager.SDSDiscoveryManager()
except ValueError as ex:
Event(
ExceptionMessage(
priority="debug",
publisher=NS.publisher_id,
payload={"message": "Failed to init SDSDiscoveryManager.",
"exception": ex
}
)
)
return
# Execute the SDS discovery plugins and tag the nodes with data
for plugin in sds_discovery_manager.get_available_plugins():
sds_details = plugin.discover_storage_system()
if ('detected_cluster_id' in sds_details and sds_details[
'detected_cluster_id'] != ""):
if sds_details:
try:
dc = NS.tendrl.objects.DetectedCluster().load()
dc_changed = False
if dc.detected_cluster_id:
if dc.detected_cluster_id != sds_details.get('detected_cluster_id'):
dc_changed = True
else:
gevent.sleep(3)
integration_index_key = \
"indexes/detected_cluster_id_to_integration_id/" \
"%s" % sds_details['detected_cluster_id']
try:
if dc_changed:
integration_id = NS.tendrl_context.integration_id
else:
integration_id = str(uuid.uuid4())
NS._int.wclient.write(integration_index_key,
integration_id,
prevExist=False)
except etcd.EtcdAlreadyExist:
if not dc_changed:
integration_id = NS._int.client.read(
integration_index_key).value
finally:
NS.tendrl_context.integration_id = integration_id
NS.tendrl_context.cluster_id = sds_details.get(
'detected_cluster_id')
NS.tendrl_context.cluster_name = sds_details.get(
'detected_cluster_name')
NS.tendrl_context.sds_name = sds_details.get(
'pkg_name')
NS.tendrl_context.sds_version = sds_details.get(
'pkg_version')
NS.tendrl_context.save()
NS.node_context = NS.node_context.load()
integration_tag = "tendrl/integration/%s" % \
integration_id
detected_cluster_tag = "detected_cluster/%s" % \
sds_details[
'detected_cluster_id']
NS.node_context.tags += [detected_cluster_tag,
integration_tag]
NS.node_context.tags = list(set(NS.node_context.tags))
NS.node_context.save()
_cluster = NS.tendrl.objects.Cluster(
integration_id=NS.tendrl_context.integration_id
).load()
NS.tendrl.objects.DetectedCluster(
detected_cluster_id=sds_details.get(
'detected_cluster_id'),
detected_cluster_name=sds_details.get(
'detected_cluster_name'),
sds_pkg_name=sds_details.get('pkg_name'),
sds_pkg_version=sds_details.get('pkg_version'),
).save()
if _cluster.is_managed == "yes":
continue
else:
_cluster.is_managed = "no"
_cluster.save()
except (etcd.EtcdException, KeyError) as ex:
Event(
ExceptionMessage(
priority="debug",
publisher=NS.publisher_id,
payload={"message": "Failed SDS detection",
"exception": ex
}
)
)
break
except Exception as ex:
Event(
ExceptionMessage(
priority="error",
publisher=NS.publisher_id,
payload={"message": "node_sync "
"SDS detection failed: " +
ex.message,
"exception": ex}
)
)
|
Python
| 0.000001 |
@@ -2056,32 +2056,175 @@
.integration_id%0A
+ NS._int.wclient.write(integration_index_key,%0A integration_id)%0A
@@ -2308,16 +2308,20 @@
uid4())%0A
+
|
dbe76ab17e795540de6a53b22f90c8af0cb15dbe
|
Add constants example
|
constants.example.py
|
constants.example.py
|
Python
| 0.000011 |
@@ -0,0 +1,756 @@
+# coding: utf-8%0Afrom __future__ import unicode_literals%0A%0Atoken = '123456789:dfghdfghdflugdfhg-77fwftfeyfgftre' # bot access_token%0Asn_stickers = ('CADAgADDwAu0BX', 'CAADA',%0A 'CDAgADEQADfvu0Bh0Xd-rAg', 'CAADAgAADfvu0Bee9LyXSj1_fAg',) # ids%0Asome2_stickers = ('CAADAKwADd_JnDFPYYarHAg', 'CAADAgADJmEyMU5rGAg')%0Aallowed_stickers = sn_stickers + some2_stickers%0Adefault_probability = 0.01 # value hidden%0Adel_symbols = '%60~1234567890!@#' # symbols to ignore%0Aquotes_dict = %7B # examples%0A (0.6, '%D1%83%D0%BD%D1%96%D0%B2%D0%B5%D1%80%D1%81%D0%B8%D1%82%D0%B5%D1%82', '%D1%83%D0%BD%D0%B8%D0%B2%D0%B5%D1%80%D1%81%D0%B8%D1%82%D0%B5%D1%82'): %22%22%22%D0%BD%D1%83 %D1%89%D0%BE %D1%82%D1%83%D1%82 %D1%81%D0%BA%D0%B0%D0%B7%D0%B0%D1%82%D0%B8%0A %D1%86%D0%B8%D1%82%D0%B0%D1%82%D0%B02%0A @, %D1%89%D0%BE %D0%92%D0%B8 %D0%BC%D0%B0%D0%BB%D0%B8 %D0%BD%D0%B0 %D1%83%D0%B2%D0%B0%D0%B7%D1%96?%22%22%22, # before sending @ will be replaced by username or name%0A (0.75, sn_stickers): %22%22%22%D1%81%D1%82%D1%96%D0%BA%D0%B5%D1%80 %D0%B7%D1%96 %D0%BC%D0%BD%D0%BE%D1%8E %D0%B4%D0%B5%D1%82%D0%B5%D0%BA%D1%82%D0%B5%D0%B4%0A %D0%B0 %D1%8F %D0%BD%D0%B5%D0%BF%D0%BE%D0%B3%D0%B0%D0%B9%D0%BD%D0%BE %D1%82%D1%83%D1%82 %D0%B2%D0%B8%D0%B3%D0%BB%D1%8F%D0%B4%D0%B0%D1%8E%0A %D1%86%D0%B8%D1%82%D0%B0%D1%82%D0%B03%22%22%22%7D%0A
|
|
d777a19bb804ae1a4268702da00d3138b028b386
|
Add a python script to start sysmobts-remote and dump docs
|
contrib/dump_docs.py
|
contrib/dump_docs.py
|
Python
| 0.000001 |
@@ -0,0 +1,841 @@
+#!/usr/bin/env python%0A%0A%22%22%22%0AStart the process and dump the documentation to the doc dir%0A%22%22%22%0A%0Aimport socket, subprocess, time,os%0A%0Aenv = os.environ%0Aenv%5B'L1FWD_BTS_HOST'%5D = '127.0.0.1'%0A%0Abts_proc = subprocess.Popen(%5B%22./src/osmo-bts-sysmo/sysmobts-remote%22,%0A%09%09%22-c%22, %22./doc/examples/osmo-bts.cfg%22%5D, env = env,%0A%09%09stdin=None, stdout=None)%0Atime.sleep(1)%0A%0Atry:%0A%09sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)%0A%09sck.setblocking(1)%0A%09sck.connect((%22localhost%22, 4241))%0A%09sck.recv(4096)%0A%0A%09# Now send the command%0A%09sck.send(%22show online-help%5Cr%22)%0A%09xml = %22%22%0A%09while True:%0A%09%09data = sck.recv(4096)%0A%09%09xml = %22%25s%25s%22 %25 (xml, data)%0A%09%09if data.endswith('%5Cr%5CnOsmoBTS%3E '):%0A%09%09%09break%0A%0A%09# Now write everything until the end to the file%0A%09out = open('doc/vty_reference.xml', 'w')%0A%09out.write(xml%5B18:-11%5D)%0A%09out.close()%0Afinally:%0A%09# Clean-up%0A%09bts_proc.kill()%0A%09bts_proc.wait()%0A%0A
|
|
436119b2ef8ea12f12b69e0d22dd3441b7e187cd
|
add ratelimit plugin
|
plugins/ratelimit.py
|
plugins/ratelimit.py
|
Python
| 0 |
@@ -0,0 +1,764 @@
+import time%0A%0Abuckets = %7B%7D%0Alast_tick = time.time()%0Atimeframe = float(yui.config_val('ratelimit', 'timeframe', default=60.0))%0Amax_msg = float(yui.config_val('ratelimit', 'messages', default=6.0))%0Aignore_for = 60.0 * float(yui.config_val('ratelimit', 'ignoreMinutes', default=3.0))%0A%0A%[email protected]('postCmd')%0Adef ratelimit(user, msg):%0A if user not in buckets.keys():%0A buckets%5Buser%5D = 1.0%0A else:%0A buckets%5Buser%5D += 1.0%0A if buckets%5Buser%5D %3E max_msg:%0A yui.ignore(ignore_for, user.nick)%0A%0A%[email protected]('tick')%0Adef tick():%0A global last_tick%0A now = time.time()%0A diff = now - last_tick%0A for user, n in buckets.items():%0A n -= ((max_msg / timeframe) * diff)%0A n = n if n %3E 0 else 0%0A buckets%5Buser%5D = n%0A last_tick = now%0A
|
|
83579a7e10d66e29fc65c43ba317c6681a393d3e
|
Add simple hub datapath
|
pox/datapaths/hub.py
|
pox/datapaths/hub.py
|
Python
| 0 |
@@ -0,0 +1,1543 @@
+# Copyright 2017 James McCauley%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at:%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0A%22%22%22%0AA simple hub datapath.%0A%0ALaunch it with a number of interface names, and it will pass packets%0Abetween them. Requires pxpcap to be built -- see %22Building pxpcap%22%0Ain the POX manual.%0A%0AExample:%0A ./pox.py datapaths.hub --ports=eth0,eth1,eth2%0A%22%22%22%0A%0Afrom pox.core import core%0Afrom Queue import Queue%0Aimport pox.lib.packet as pkt%0Afrom pox.lib.interfaceio import PCapInterface%0A%0A%0Aclass Hub (object):%0A %22%22%22%0A A simple hub%0A %22%22%22%0A def __init__ (self, ports=%5B%5D):%0A self._ports = set()%0A self.rx_bytes = 0%0A for p in ports:%0A self.add_port(p)%0A%0A def add_port (self, port):%0A p = PCapInterface(port)%0A p.addListeners(self)%0A self._ports.add(p)%0A%0A def _handle_RXData (self, event):%0A self.rx_bytes += len(event.data)%0A for port in self._ports:%0A if port is event.interface: continue%0A port.write(event.data)%0A%0A%0Adef launch (ports):%0A ports = ports.replace(%22,%22,%22 %22).split()%0A l = Hub()%0A core.register(%22hub%22, l)%0A for p in ports:%0A l.add_port(p)%0A
|
|
d753fe46507d2e829c0b6ffc3120ec8f9472c4f1
|
Add Problem 59 solution.
|
project-euler/059.py
|
project-euler/059.py
|
Python
| 0.000007 |
@@ -0,0 +1,2392 @@
+'''%0AProblem 59%0A19 December 2003%0A%0AEach character on a computer is assigned a unique code and the preferred standard is ASCII (American Standard Code for Information Interchange). For example, uppercase A = 65, asterisk (*) = 42, and lowercase k = 107.%0A%0AA modern encryption method is to take a text file, convert the bytes to ASCII, then XOR each byte with a given value, taken from a secret key. The advantage with the XOR function is that using the same encryption key on the cipher text, restores the plain text; for example, 65 XOR 42 = 107, then 107 XOR 42 = 65.%0A%0AFor unbreakable encryption, the key is the same length as the plain text message, and the key is made up of random bytes. The user would keep the encrypted message and the encryption key in different locations, and without both %22halves%22, it is impossible to decrypt the message.%0A%0AUnfortunately, this method is impractical for most users, so the modified method is to use a password as a key. If the password is shorter than the message, which is likely, the key is repeated cyclically throughout the message. The balance for this method is using a sufficiently long password key for security, but short enough to be memorable.%0A%0AYour task has been made easy, as the encryption key consists of three lower case characters. Using cipher1.txt, a file containing the encrypted ASCII codes, and the knowledge that the plain text must contain common English words, decrypt the message and find the sum of the ASCII values in the original text.%0A'''%0A%0Aimport collections, operator, string%0Afile = open('059.txt', 'r')%0Aencrypted = map(int, file.read().split(','))%0Alength = len(encrypted)%0Adecrypted = %5B0 for i in range(length)%5D%0Achars = range(97, 123)%0A%0Afor i in chars:%0A for x in range(length)%5B0::3%5D:%0A decrypted%5Bx%5D = operator.xor(i, encrypted%5Bx%5D)%0A for j in chars:%0A for x in range(length)%5B1::3%5D:%0A decrypted%5Bx%5D = operator.xor(j, encrypted%5Bx%5D)%0A for k in chars:%0A for x in range(length)%5B2::3%5D:%0A decrypted%5Bx%5D = operator.xor(k, encrypted%5Bx%5D)%0A%0A # Spaces are the most common character in the English language, occurring%0A # just less than once per every 5 chars (19.18182%25), so filter by a high%0A # frequency of spaces. (See http://www.data-compression.com/english.html)%0A if (decrypted.count(32) %3E 0.15*length):%0A print ''.join(map(chr, decrypted))%0A print sum(%5Bchar for char in decrypted%5D)%0A
|
|
d1fcf47d62671abbb2ec8a278460dd64a4de03c2
|
Create cryptoseven.py
|
cryptoseven.py
|
cryptoseven.py
|
Python
| 0.000001 |
@@ -0,0 +1,614 @@
+import sys%0A%0A %0Adef strxor(a, b): # xor two strings of different lengths%0A if len(a) %3E len(b):%0A return %22%22.join(%5Bchr(ord(x) %5E ord(y)) for (x, y) in zip(a%5B:len(b)%5D, b)%5D)%0A else:%0A return %22%22.join(%5Bchr(ord(x) %5E ord(y)) for (x, y) in zip(a, b%5B:len(a)%5D)%5D)%0A%0A %0Adef printAscii(msg):%0A z = %5Bchr(ord(x)) for x in msg%5D %0A x = %22%22.join(z)%0A print x.encode('hex')%0A %0Adef main():%0A text = %22attack at dawn%22%0A %0A %0A enc = %226c73d5240a948c86981bc294814d%22.decode('hex')%0A key = strxor(text, enc)%0A %0A %0A text2 = %22attack at dusk%22%0A enc2 = strxor(text2, key)%0A %0A print enc2.encode('hex')%0A %0A %0A %0Amain()%0A
|
|
baeecbd66e1acd48aa11fdff4c65567c72d88186
|
Create client.py
|
ohesteebee/client.py
|
ohesteebee/client.py
|
Python
| 0.000001 |
@@ -0,0 +1,1093 @@
+%22%22%22Ohessteebee client.%22%22%22%0Aimport requests%0Afrom typing import Dict%0A%0APutDict = Dict%5Bstr, str%5D%0A%0Aclass Ohessteebee:%0A%0A def __init__(self, endpoint, port=4242):%0A self.session = requests.Session()%0A self.req_path = %22http://%7Bendpoint%7D:%7Bport%7D%22.format(%0A endpoint=endpoint, port=port)%0A%0A def _generate_put_dict(metric: str, timestamp: int, value: int, **kwargs) -%3E PutDict:%0A if kwargs:%0A tags = %7B**kwargs%7D%0A else:%0A tags = %7B%7D%0A%0A response = %7B%0A %22metric%22: metric,%0A %22timestamp%22: timestamp,%0A %22value%22: value,%0A %22tags%22: tags%0A %7D%0A%0A return response%0A%0A def query(self, metric: str, start_date=None, end_date=None):%0A %22%22%22Get metric from OSTB.%22%22%22%0A path = %22/api/query%22%0A api_url = self.req_path + path%0A self.session.get(api_url)%0A%0A def put(self, metric: str, timestamp: int, **kwargs):%0A %22%22%22Put metric into OSTB.%22%22%22%0A path = %22/api/put%22%0A api_url = self.req_path + path%0A%0A data = self._generate_put_dict()%0A self.sesion.post(api_url)%0A
|
|
22494a45d2bce6774bdc50409a71f259841287f5
|
add initial GlimError
|
glim/exception.py
|
glim/exception.py
|
Python
| 0.01808 |
@@ -0,0 +1,34 @@
+%0Aclass GlimError(Exception):%0A%09pass
|
|
8d5059fcd672fb4f0fcd7a2b57bf41f57b6269e5
|
add mongo handler
|
src/orchestrator/core/mongo.py
|
src/orchestrator/core/mongo.py
|
Python
| 0.000001 |
@@ -0,0 +1,2255 @@
+#%0A# Copyright 2018 Telefonica Espana%0A#%0A# This file is part of IoT orchestrator%0A#%0A# IoT orchestrator is free software: you can redistribute it and/or%0A# modify it under the terms of the GNU Affero General Public License as%0A# published by the Free Software Foundation, either version 3 of the%0A# License, or (at your option) any later version.%0A#%0A# IoT orchestrator is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero%0A# General Public License for more details.%0A#%0A# You should have received a copy of the GNU Affero General Public License%0A# along with IoT orchestrator. If not, see http://www.gnu.org/licenses/.%0A#%0A# For those usages not covered by this license please contact with%0A# iot_support at tid dot es%0A#%0A# Author: IoT team%0A#%0Aimport json%0Aimport logging%0A%0Afrom orchestrator.common.util import RestOperations%0A%0Aimport pymongo%0A%0Alogger = logging.getLogger('orchestrator_core')%0A%0Aclass MongoDBOperations(object):%0A '''%0A IoT platform: MongoDB%0A '''%0A%0A def __init__(self,%0A MONGODB_URI=None,%0A CORRELATOR_ID=None,%0A TRANSACTION_ID=None):%0A self.MONGODB_URI = MONGO_URI%0A self.client = pymongo.MongoClient(self.MONGODB_URI)%0A%0A def checkMongo(self):%0A try:%0A client.list_databases()%0A return True%0A except Exception, e:%0A logger.warn(%22checkMongo exception: %25s%22 %25 e)%0A return False%0A%0A def createIndexes(self, SERVICE_NAME):%0A try: %0A databaseName = 'orion-' + SERVICE_NAME%0A db = self.client%5BdatabaseName%5D%0A db.entities.create_index(%22_id.id%22)%0A db.entities.create_index(%22_id.type%22)%0A db.entities.create_index(%22_id.servicePath%22)%0A db.entities.create_index(%22_id.creDate%22)%0A except Exception, e:%0A logger.warn(%22createIndex exception: %25s%22 %25 e)%0A%0A def removeDatabase(self, SERVICE_NAME):%0A try: %0A databaseName = 'orion-' + SERVICE_NAME%0A self.client.drop_database(databaseName)%0A except Exception, e:%0A logger.warn(%22createIndex exception: %25s%22 %25 e) %0A %0A
|
|
90ec9def45bcc50047d3511943c463f57f771f00
|
Bump to 3.2.0
|
dbbackup/__init__.py
|
dbbackup/__init__.py
|
"Management commands to help backup and restore a project database and media"
VERSION = (3, 1, 3)
__version__ = '.'.join([str(i) for i in VERSION])
__author__ = 'Michael Shepanski'
__email__ = '[email protected]'
__url__ = 'https://github.com/django-dbbackup/django-dbbackup'
default_app_config = 'dbbackup.apps.DbbackupConfig'
|
Python
| 0.00008 |
@@ -89,12 +89,12 @@
(3,
-1, 3
+2, 0
)%0A__
|
2c29829bb6e0483a3dc7d98bc887ae86a3a233b7
|
Fix dir name of preprocess
|
pyPanair/preprocess/__init__.py
|
pyPanair/preprocess/__init__.py
|
Python
| 0.998086 |
@@ -0,0 +1 @@
+
|
|
3e7f8c5b87a85958bd45636788215db1ba4f2fd8
|
Create __init__.py
|
src/site/app/model/__init__.py
|
src/site/app/model/__init__.py
|
Python
| 0.000429 |
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-%0A
|
|
b14545460da0b481ff604a1770441cc963c21b15
|
Clean up JS extraction
|
cfscrape/__init__.py
|
cfscrape/__init__.py
|
from time import sleep
import logging
import random
import re
import os
from requests.sessions import Session
import js2py
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
DEFAULT_USER_AGENTS = [
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:41.0) Gecko/20100101 Firefox/41.0"
]
DEFAULT_USER_AGENT = random.choice(DEFAULT_USER_AGENTS)
class CloudflareScraper(Session):
def __init__(self, *args, **kwargs):
super(CloudflareScraper, self).__init__(*args, **kwargs)
if "requests" in self.headers["User-Agent"]:
# Spoof Firefox on Linux if no custom User-Agent has been set
self.headers["User-Agent"] = DEFAULT_USER_AGENT
def request(self, method, url, *args, **kwargs):
resp = super(CloudflareScraper, self).request(method, url, *args, **kwargs)
# Check if Cloudflare anti-bot is on
if resp.status_code == 503 and resp.headers.get("Server") == "cloudflare-nginx":
return self.solve_cf_challenge(resp, **kwargs)
# Otherwise, no Cloudflare anti-bot detected
return resp
def solve_cf_challenge(self, resp, **kwargs):
sleep(5) # Cloudflare requires a delay before solving the challenge
body = resp.text
parsed_url = urlparse(resp.url)
domain = urlparse(resp.url).netloc
submit_url = "%s://%s/cdn-cgi/l/chk_jschl" % (parsed_url.scheme, domain)
params = kwargs.setdefault("params", {})
headers = kwargs.setdefault("headers", {})
headers["Referer"] = resp.url
try:
params["jschl_vc"] = re.search(r'name="jschl_vc" value="(\w+)"', body).group(1)
params["pass"] = re.search(r'name="pass" value="(.+?)"', body).group(1)
# Extract the arithmetic operation
js = self.extract_js(body)
except Exception:
# Something is wrong with the page.
# This may indicate Cloudflare has changed their anti-bot
# technique. If you see this and are running the latest version,
# please open a GitHub issue so I can update the code accordingly.
logging.error("[!] Unable to parse Cloudflare anti-bots page. "
"Try upgrading cloudflare-scrape, or submit a bug report "
"if you are running the latest version. Please read "
"https://github.com/Anorov/cloudflare-scrape#updates "
"before submitting a bug report.")
raise
# Safely evaluate the Javascript expression
js = js.replace('return', '')
params["jschl_answer"] = str(int(js2py.eval_js(js)) + len(domain))
return self.get(submit_url, **kwargs)
def extract_js(self, body):
js = re.search(r"setTimeout\(function\(\){\s+(var "
"s,t,o,p,b,r,e,a,k,i,n,g,f.+?\r?\n[\s\S]+?a\.value =.+?)\r?\n", body).group(1)
js = re.sub(r"a\.value = (parseInt\(.+?\)).+", r"\1", js)
js = re.sub(r"\s{3,}[a-z](?: = |\.).+", "", js)
# Strip characters that could be used to exit the string context
# These characters are not currently used in Cloudflare's arithmetic snippet
js = re.sub(r"[\n\\']", "", js)
return js.replace("parseInt", "return parseInt")
@classmethod
def create_scraper(cls, sess=None, **kwargs):
"""
Convenience function for creating a ready-to-go requests.Session (subclass) object.
"""
scraper = cls()
if sess:
attrs = ["auth", "cert", "cookies", "headers", "hooks", "params", "proxies", "data"]
for attr in attrs:
val = getattr(sess, attr, None)
if val:
setattr(scraper, attr, val)
return scraper
## Functions for integrating cloudflare-scrape with other applications and scripts
@classmethod
def get_tokens(cls, url, user_agent=None, **kwargs):
scraper = cls.create_scraper()
if user_agent:
scraper.headers["User-Agent"] = user_agent
try:
resp = scraper.get(url)
resp.raise_for_status()
except Exception as e:
logging.error("'%s' returned an error. Could not collect tokens." % url)
raise
domain = urlparse(resp.url).netloc
cookie_domain = None
for d in scraper.cookies.list_domains():
if d.startswith(".") and d in ("." + domain):
cookie_domain = d
break
else:
raise ValueError("Unable to find Cloudflare cookies. Does the site actually have Cloudflare IUAM (\"I'm Under Attack Mode\") enabled?")
return ({
"__cfduid": scraper.cookies.get("__cfduid", "", domain=cookie_domain),
"cf_clearance": scraper.cookies.get("cf_clearance", "", domain=cookie_domain)
},
scraper.headers["User-Agent"]
)
@classmethod
def get_cookie_string(cls, url, user_agent=None, **kwargs):
"""
Convenience function for building a Cookie HTTP header value.
"""
tokens, user_agent = cls.get_tokens(url, user_agent=user_agent)
return "; ".join("=".join(pair) for pair in tokens.items()), user_agent
create_scraper = CloudflareScraper.create_scraper
get_tokens = CloudflareScraper.get_tokens
get_cookie_string = CloudflareScraper.get_cookie_string
|
Python
| 0.000002 |
@@ -59,18 +59,8 @@
re%0A
-import os%0A
from
@@ -3034,46 +3034,8 @@
ion%0A
- js = js.replace('return', '')%0A
@@ -3691,47 +3691,8 @@
n js
-.replace(%22parseInt%22, %22return parseInt%22)
%0A%0A
|
38c2291ab23d86d220446e594d52cce80ea4ec2a
|
Create Count_Inversions_Array.py
|
Experience/Count_Inversions_Array.py
|
Experience/Count_Inversions_Array.py
|
Python
| 0.000003 |
@@ -0,0 +1,1827 @@
+'''%0AInversion Count for an array indicates %E2%80%93 how far (or close) the array is from being sorted. If array is already sorted then inversion count is 0. If array is sorted in reverse order that inversion count is the maximum. %0AFormally speaking, two elements a%5Bi%5D and a%5Bj%5D form an inversion if a%5Bi%5D %3E a%5Bj%5D and i %3C j%0A%0AExample:%0AThe sequence 2, 4, 1, 3, 5 has three inversions (2, 1), (4, 1), (4, 3).%0A'''%0A%0A# Note: G4G Analysis (http://www.geeksforgeeks.org/counting-inversions/)%0Adef count_inver(A):%0A if not A: return A%0A length = len(A)%0A return merge_sort(A, 0, length-1)%0A%0A%0Adef merge_sort(A, left, right):%0A inver_cnt = 0%0A if left %3C right:%0A mid = (left + right)/2%0A inver_cnt = merge_sort(A, left, mid)%0A inver_cnt += merge_sort(A, mid+1, right)%0A inver_cnt += merge(A, left, mid+1, right)%0A return inver_cnt%0A%0Adef merge(A, left, mid, right):%0A i = left; j = mid; k = left%0A print %22i: %25d, mid: %25d, j: %25d, k: %25d, right: %25d%22 %25(i, mid, j, k, right)%0A inver_cnt = 0%0A tmp = %5B0 for p in xrange(len(A))%5D%0A print %22tmp: %22, tmp%0A while (i %3C mid) and (j %3C= right):%0A print %22A%5Bi%5D: %25d, A%5Bj%5D: %25d%22 %25(A%5Bi%5D, A%5Bj%5D)%0A if A%5Bi%5D %3C= A%5Bj%5D:%0A tmp%5Bk%5D = A%5Bi%5D%0A i += 1%0A k += 1%0A print %22%3C after: i: %25d, j: %25d, k: %25d, right: %25d%22 %25(i, j, k, right)%0A else:%0A tmp%5Bk%5D = A%5Bj%5D%0A j += 1%0A k += 1%0A print %22%3E after: i: %25d, j: %25d, k: %25d, right: %25d%22 %25(i, j, k, right)%0A inver_cnt += mid - i%0A print %22inver_cnt: %22, inver_cnt%0A%0A while i %3C mid:%0A tmp%5Bk%5D = A%5Bi%5D%0A i += 1%0A k += 1%0A%0A while j %3C= right:%0A tmp%5Bk%5D = A%5Bj%5D%0A j += 1%0A k ++ 1%0A%0A A%5Bleft:right+1%5D = tmp%5Bleft:right+1%5D%0A print %22after merge: A%22, A%0A return inver_cnt%0A%0Ailist = %5B2,4,5,1,3,5%5D%0Aprint count_inver(ilist)%0A
|
|
0250d46aec1bb060de3cc9a0e619670a7b0d1d03
|
append log
|
delugePostProcess.py
|
delugePostProcess.py
|
#!/usr/bin/env python
import os
import sys
from autoprocess import autoProcessTV, autoProcessMovie, autoProcessTVSR
from readSettings import ReadSettings
from mkvtomp4 import MkvtoMp4
from deluge import DelugeClient
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename=os.path.join(os.path.dirname(sys.argv[0]), "info.log"),
filemode='w')
log = logging.getLogger("delugePostProcess")
log.info("Deluge post processing started.")
settings = ReadSettings(os.path.dirname(sys.argv[0]), "autoProcess.ini")
categories = [settings.deluge['sb'], settings.deluge['cp'], settings.deluge['sonarr'], settings.deluge['sr'], settings.deluge['bypass']]
if len(sys.argv) < 4:
log.error("Not enough command line parameters present, are you launching this from deluge?")
sys.exit()
path = str(sys.argv[3])
torrent_name = str(sys.argv[2])
torrent_id = str(sys.argv[1])
log.debug("Path: %s." % path)
log.debug("Torrent: %s." % torrent_name)
log.debug("Hash: %s." % torrent_id)
client = DelugeClient()
client.connect(host=settings.deluge['host'], port=int(settings.deluge['port']), username=settings.deluge['user'], password=settings.deluge['pass'])
torrent_files = client.core.get_torrent_status(torrent_id, ['files']).get()['files']
files = []
log.debug("List of files in torrent:")
for contents in torrent_files:
files.append(contents['path'])
log.debug(contents['path'])
try:
category = client.core.get_torrent_status(torrent_id, ['label']).get()['label'].lower()
log.debug("Category: %s" % category)
except Exception as e:
log.exeption("Unable to connect to deluge to retrieve category.")
sys.exit()
if category.lower() not in categories:
log.error("No valid category detected.")
sys.exit()
if len(categories) != len(set(categories)):
log.error("Duplicate category detected. Category names must be unique.")
sys.exit()
if settings.deluge['convert']:
# Perform conversion.
settings.delete = False
if not settings.output_dir:
settings.output_dir = os.path.join(path, torrent_id)
if not os.path.exists(settings.output_dir):
os.mkdir(settings.output_dir)
delete_dir = settings.output_dir
converter = MkvtoMp4(settings)
for filename in files:
inputfile = os.path.join(path, filename)
log.info("Converting file %s at location %s." % (inputfile, settings.output_dir))
if MkvtoMp4(settings).validSource(inputfile):
converter.process(inputfile, reportProgress=True)
path = converter.output_dir
else:
newpath = os.path.join(path, torrent_id)
if not os.path.exists(newpath):
os.mkdir(newpath)
for filename in files:
inputfile = os.path.join(path, filename)
log.info("Copying file %s to %s." % (inputfile, newpath))
shutil.copy(inputfile, newpath)
path = newpath
delete_dir = newpath
# Send to Sickbeard
if (category == categories[0]):
log.info("Passing %s directory to Sickbeard." % path)
autoProcessTV.processEpisode(path, settings)
# Send to CouchPotato
elif (category == categories[1]):
log.info("Passing %s directory to CouchPotato." % path)
autoProcessMovie.process(path, settings)
# Send to Sonarr
elif (category == categories[2]):
log.info("Passing %s directory to Sonarr." % path)
# Import requests
try:
import requests
except ImportError:
log.exception("Python module REQUESTS is required. Install with 'pip install requests' then try again.")
sys.exit()
host=settings.Sonarr['host']
port=settings.Sonarr['port']
apikey = settings.Sonarr['apikey']
if apikey == '':
log.error("Your Sonarr API Key can not be blank. Update autoProcess.ini")
try:
ssl=int(settings.Sonarr['ssl'])
except:
ssl=0
if ssl:
protocol="https://"
else:
protocol="http://"
url = protocol+host+":"+port+"/api/command"
payload = {'name': 'downloadedepisodesscan','path': path}
log.info("Requesting Sonarr to scan folder '"+path+"'")
headers = {'X-Api-Key': apikey}
try:
r = requests.post(url, data=json.dumps(payload), headers=headers)
rstate = r.json()
log.info("Sonarr responds as "+rstate['state']+".")
except:
log.error("Update to Sonarr failed, check if Sonarr is running, autoProcess.ini for errors, or check install of python modules requests.")
elif (category == categories[3]):
log.info("Passing %s directory to Sickrage." % path)
autoProcessTVSR.processEpisode(path, settings)
elif (category == categories[4]):
log.info("Bypassing any further processing as per category.")
if delete_dir:
try:
os.rmdir(delete_dir)
log.debug("Successfully removed tempoary directory %s." % delete_dir)
except:
log.exception("Unable to delete temporary directory.")
|
Python
| 0.002123 |
@@ -506,17 +506,17 @@
lemode='
-w
+a
')%0Alog =
|
59b531e11266b2ff8184c04cda92bcc2fad71fa0
|
Create core.py
|
crispy/actions/core.py
|
crispy/actions/core.py
|
Python
| 0.000001 |
@@ -0,0 +1,71 @@
+from crispy.actions.attacks import Attack, Melee, Ranged, Throw, Shoot%0A
|
|
d6dc45756cbb30a8f707d683943ccd4ee0391e6b
|
Add an aws settings for the cms
|
cms/envs/aws.py
|
cms/envs/aws.py
|
Python
| 0 |
@@ -0,0 +1,1389 @@
+%22%22%22%0AThis is the default template for our main set of AWS servers.%0A%22%22%22%0Aimport json%0A%0Afrom .logsettings import get_logger_config%0Afrom .common import *%0A%0A############################### ALWAYS THE SAME ################################%0ADEBUG = False%0ATEMPLATE_DEBUG = False%0A%0AEMAIL_BACKEND = 'django_ses.SESBackend'%0ASESSION_ENGINE = 'django.contrib.sessions.backends.cache'%0ADEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'%0A%0A########################### NON-SECURE ENV CONFIG ##############################%0A# Things like server locations, ports, etc.%0Awith open(ENV_ROOT / %22env.json%22) as env_file:%0A ENV_TOKENS = json.load(env_file)%0A%0ASITE_NAME = ENV_TOKENS%5B'SITE_NAME'%5D%0A%0ALOG_DIR = ENV_TOKENS%5B'LOG_DIR'%5D%0A%0ACACHES = ENV_TOKENS%5B'CACHES'%5D%0A%0Afor feature, value in ENV_TOKENS.get('MITX_FEATURES', %7B%7D).items():%0A MITX_FEATURES%5Bfeature%5D = value%0A%0ALOGGING = get_logger_config(LOG_DIR,%0A logging_env=ENV_TOKENS%5B'LOGGING_ENV'%5D,%0A syslog_addr=(ENV_TOKENS%5B'SYSLOG_SERVER'%5D, 514),%0A debug=False)%0A%0AREPOS = ENV_TOKENS%5B'REPOS'%5D%0A%0A%0A############################## SECURE AUTH ITEMS ###############################%0A# Secret things: passwords, access keys, etc.%0Awith open(ENV_ROOT / %22auth.json%22) as auth_file:%0A AUTH_TOKENS = json.load(auth_file)%0A%0ADATABASES = AUTH_TOKENS%5B'DATABASES'%5D%0AMODULESTORE = AUTH_TOKENS%5B'MODULESTORE'%5D%0A
|
|
b32d659b85901a8e04c6c921928483fda3b3e6e0
|
Add the storage utility for parsing the config file structure in a more readable fashion.
|
src/leap/mx/util/storage.py
|
src/leap/mx/util/storage.py
|
Python
| 0 |
@@ -0,0 +1,885 @@
+%0Aclass Storage(dict):%0A %22%22%22%0A A Storage object is like a dictionary except %60obj.foo%60 can be used%0A in addition to %60obj%5B'foo'%5D%60.%0A%0A %3E%3E%3E o = Storage(a=1)%0A %3E%3E%3E o.a%0A 1%0A %3E%3E%3E o%5B'a'%5D%0A 1%0A %3E%3E%3E o.a = 2%0A %3E%3E%3E o%5B'a'%5D%0A 2%0A %3E%3E%3E del o.a%0A %3E%3E%3E o.a%0A None%0A %22%22%22%0A def __getattr__(self, key):%0A try:%0A return self%5Bkey%5D%0A except KeyError, k:%0A return None%0A%0A def __setattr__(self, key, value):%0A self%5Bkey%5D = value%0A%0A def __delattr__(self, key):%0A try:%0A del self%5Bkey%5D%0A except KeyError, k:%0A raise AttributeError, k%0A%0A def __repr__(self):%0A return '%3CStorage ' + dict.__repr__(self) + '%3E'%0A%0A def __getstate__(self):%0A return dict(self)%0A%0A def __setstate__(self, value):%0A for (k, v) in value.items():%0A self%5Bk%5D = v%0A
|
|
7c8f2464b303b2a40f7434a0c26b7f88c93b6ddf
|
add migration
|
corehq/apps/accounting/migrations/0036_subscription_skip_invoicing_if_no_feature_charges.py
|
corehq/apps/accounting/migrations/0036_subscription_skip_invoicing_if_no_feature_charges.py
|
Python
| 0.000001 |
@@ -0,0 +1,477 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('accounting', '0035_kill_date_received'),%0A %5D%0A%0A operations = %5B%0A migrations.AddField(%0A model_name='subscription',%0A name='skip_invoicing_if_no_feature_charges',%0A field=models.BooleanField(default=False),%0A preserve_default=True,%0A ),%0A %5D%0A
|
|
5e765ecf387d52c22371a69df82beacddcd12e38
|
Test COREID is read-only.
|
revelation/test/test_storage.py
|
revelation/test/test_storage.py
|
Python
| 0 |
@@ -0,0 +1,564 @@
+from revelation.test.machine import StateChecker, new_state%0A%0A%0Adef test_coreid_read_only():%0A state = new_state(rfCOREID=0x808)%0A # Change by writing to register.%0A state.rf%5B0x65%5D = 0x100%0A expected_state = StateChecker(rfCOREID=0x808)%0A expected_state.check(state)%0A # Change by writing to memory.%0A # This _is_ possible, because we need to be able to write the COREID%0A # location when the state is initially constructed.%0A state.mem.write(0x808f0704, 12, 0x100)%0A expected_state = StateChecker(rfCOREID=0x100)%0A expected_state.check(state)%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.