commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
---|---|---|---|---|---|---|---|
00efe5a076bf3029b91e8f64e39892922d9632a9
|
Fix carbon timer proc being GC'd.
|
pyglet/app/carbon.py
|
pyglet/app/carbon.py
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import ctypes
import Queue
from pyglet import app
from pyglet.app.base import PlatformEventLoop
from pyglet.libs.darwin import *
EventLoopTimerProc = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p)
class CarbonEventLoop(PlatformEventLoop):
def __init__(self):
self._event_loop = carbon.GetMainEventLoop()
self._timer = ctypes.c_void_p()
self._timer_func = None
super(CarbonEventLoop, self).__init__()
def notify(self):
carbon.SetEventLoopTimerNextFireTime(
self._timer, ctypes.c_double(0.0))
def start(self):
# Create timer
timer = self._timer
idle_event_proc = EventLoopTimerProc(self._timer_proc)
carbon.InstallEventLoopTimer(self._event_loop,
ctypes.c_double(0.1), #?
ctypes.c_double(kEventDurationForever),
idle_event_proc,
None,
ctypes.byref(timer))
def stop(self):
carbon.RemoveEventLoopTimer(self._timer)
def step(self, timeout=None):
self.dispatch_posted_events()
event_dispatcher = carbon.GetEventDispatcherTarget()
e = ctypes.c_void_p()
if timeout is None:
timeout = kEventDurationForever
self._is_running.set()
if carbon.ReceiveNextEvent(0, None, ctypes.c_double(timeout),
True, ctypes.byref(e)) == 0:
carbon.SendEventToEventTarget(e, event_dispatcher)
carbon.ReleaseEvent(e)
self._is_running.clear()
def set_timer(self, func, interval):
if interval is None or func is None:
interval = kEventDurationForever
self._timer_func = func
carbon.SetEventLoopTimerNextFireTime(self._timer,
ctypes.c_double(interval))
def _timer_proc(self, timer, data):
if self._timer_func:
self._timer_func()
'''
self.dispatch_posted_events()
allow_polling = True
for window in app.windows:
# Check for live resizing
if window._resizing is not None:
allow_polling = False
old_width, old_height = window._resizing
rect = Rect()
carbon.GetWindowBounds(window._window,
kWindowContentRgn,
ctypes.byref(rect))
width = rect.right - rect.left
height = rect.bottom - rect.top
if width != old_width or height != old_height:
window._resizing = width, height
window.switch_to()
window.dispatch_event('on_resize', width, height)
# Check for live dragging
if window._dragging:
allow_polling = False
# Check for deferred recreate
if window._recreate_deferred:
# Break out of ReceiveNextEvent so it can be processed
# in next iteration.
carbon.QuitEventLoop(self._event_loop)
self._force_idle = True
sleep_time = self.idle()
if sleep_time is None:
sleep_time = kEventDurationForever
elif sleep_time < 0.01 and allow_polling and self._allow_polling:
# Switch event loop to polling.
carbon.QuitEventLoop(self._event_loop)
self._force_idle = True
sleep_time = kEventDurationForever
carbon.SetEventLoopTimerNextFireTime(timer, ctypes.c_double(sleep_time))
'''
|
Python
| 0 |
@@ -2183,16 +2183,85 @@
= None%0A
+ self._timer_func_proc = EventLoopTimerProc(self._timer_proc)%0A
@@ -2300,16 +2300,16 @@
nit__()%0A
-
%0A def
@@ -2493,71 +2493,8 @@
mer%0A
- idle_event_proc = EventLoopTimerProc(self._timer_proc)%0A
@@ -2724,18 +2724,24 @@
-idle_event
+self._timer_func
_pro
|
b41444b5f7c48c4bc46a49405f7b053dcb8ea66c
|
rename resource function
|
into/backends/sas.py
|
into/backends/sas.py
|
from __future__ import absolute_import, division, print_function
import sas7bdat
from sas7bdat import SAS7BDAT
import datashape
from datashape import discover, dshape
from collections import Iterator
import pandas as pd
import sqlalchemy as sa
from .sql import dshape_to_alchemy, dshape_to_table
from ..append import append
from ..convert import convert
from ..resource import resource
SAS_type_map = {'number': 'float64',
'string': 'string'}
@resource.register('.+\.(sas7bdat)')
def resource_csv(uri, **kwargs):
return SAS7BDAT(uri, **kwargs)
@discover.register(SAS7BDAT)
def discover_sas(f, **kwargs):
cols = [col.name.decode("utf-8") for col in f.header.parent.columns]
types = [SAS_type_map[col.type] for col in f.header.parent.columns]
measure = ",".join(col + ":" + _type for col, _type in zip(cols, types))
ds = "var * {" + measure + "}"
return dshape(ds)
@convert.register(pd.DataFrame, SAS7BDAT, cost=4.0)
def sas_to_DataFrame(s, dshape=None, **kwargs):
return s.to_data_frame()
@convert.register(list, SAS7BDAT, cost=8.0)
def sas_to_list(s, dshape=None, **kwargs):
s.skip_header = True
return list(s.readlines())
@convert.register(Iterator, SAS7BDAT, cost=1.0)
def sas_to_iterator(s):
s.skip_header = True
return s.readlines()
@append.register(sa.Table, SAS7BDAT)
def append_sas_to_table(t, s, **kwargs):
append(t, sas_to_iterator(s), **kwargs)
def sas_to_table(s, metadata=None):
ds = discover_sas(s)
name = s.header.properties.name.decode("utf-8")
return dshape_to_table(name, ds, metadata)
|
Python
| 0.000005 |
@@ -512,11 +512,11 @@
rce_
-csv
+sas
(uri
|
4e1d611a06874d478e91185a0349cfc3747e36ab
|
Create __init__.py
|
bin/map/__init__.py
|
bin/map/__init__.py
|
Python
| 0.000429 |
@@ -0,0 +1 @@
+%0A
|
|
7f4079c30bf5a693f1ccad38109bbfc83a076f22
|
Add palette utilities
|
bingraphvis/util.py
|
bingraphvis/util.py
|
Python
| 0 |
@@ -0,0 +1,1138 @@
+#generated using palettable%0APALETTES = %7B%0A 'grays' : %5B'#FFFFFD', '#D6D6D4', '#B1B1B0', '#908F8F', '#727171', '#545453', '#373737', '#1A1919', '#000000'%5D, %0A 'greens' : %5B'#F7FCF5', '#E5F5E0', '#C7E9C0', '#A1D99B', '#74C476', '#41AB5D', '#238B45', '#006D2C', '#00441B'%5D, %0A 'purples': %5B'#FCFBFD', '#EFEDF5', '#DADAEB', '#BCBDDC', '#9E9AC8', '#807DBA', '#6A51A3', '#54278F', '#3F007D'%5D, %0A 'blues' : %5B'#F7FBFF', '#DEEBF7', '#C6DBEF', '#9ECAE1', '#6BAED6', '#4292C6', '#2171B5', '#08519C', '#08306B'%5D,%0A 'reds' : %5B'#FFF5F0', '#FEE0D2', '#FCBBA1', '#FC9272', '#FB6A4A', '#EF3B2C', '#CB181D', '#A50F15', '#67000D'%5D%0A%7D%0A%0Atry:%0A from palettable.colorbrewer.sequential import *%0A from palettable.cmocean.sequential import *%0A%0A PALETTES.update(%7B%0A%09'greens' : Greens_9.hex_colors,%0A%09'blues' : Blues_9.hex_colors,%0A%09'purples': Purples_9.hex_colors,%0A%09'reds' : Reds_9.hex_colors,%0A%09'grays' : Gray_9_r.hex_colors,%0A%09'algae' : Algae_8.hex_colors,%0A%09'solar' : Solar_9_r.hex_colors%0A %7D)%0Aexcept Exception,e:%0A print e%0A pass%0A%0Aprint PALETTES%0A%0Adef get_palette(name):%0A return PALETTES%5Bname%5D%0A %0Adef get_palette_names():%0A return PALETTES.keys()%0A
|
|
4022a35c3799abb2da16a178cfdc16bf93a9c580
|
Fix syntax error
|
blues/postgres.py
|
blues/postgres.py
|
"""
Postgres Blueprint
==================
**Fabric environment:**
.. code-block:: yaml
blueprints:
- blues.postgres
settings:
postgres:
version: 9.3 # PostgreSQL version (required)
# bind: * # What IP address(es) to listen on, use '*' for all (Default: localhost)
schemas:
some_schema_name: # The schema name
user: foo # Username to connect to schema
password: bar # Password to connect to schema (optional)
"""
import os
from datetime import datetime
from fabric.contrib import files
from fabric.decorators import task
from fabric.operations import prompt
from fabric.state import env
from refabric.api import run, info
from refabric.context_managers import sudo, silent
from refabric.contrib import blueprints
from . import debian
__all__ = ['start', 'stop', 'restart', 'reload', 'setup', 'configure',
'setup_schemas', 'generate_pgtune_conf', 'dump']
blueprint = blueprints.get(__name__)
start = debian.service_task('postgresql', 'start')
stop = debian.service_task('postgresql', 'stop')
restart = debian.service_task('postgresql', 'restart')
reload = debian.service_task('postgresql', 'reload')
version = lambda: blueprint.get('version', '9.1')
postgres_root = lambda *a: os.path.join('/etc/postgresql/{}/main/'.format(version()), *a)
def install():
with sudo():
debian.apt_get('install',
'postgresql',
'postgresql-server-dev-{}'.format(version()),
'libpq-dev',
'postgresql-contrib-{}'.format(version()),
'pgtune')
@task
def setup():
"""
Install, configure Postgresql and create schemas
"""
install()
# Bump shared memory limits
setup_shared_memory()
# Generate pgtune.conf
generate_pgtune_conf()
# Upload templates
configure()
# Create schemas and related users
setup_schemas()
@task
def configure():
"""
Configure Postgresql
"""
context = {
'listen_addresses': blueprint.get('bind', 'localhost')
}
updates = [blueprint.upload(os.path.join('.', 'pgtune.conf'), postgres_root(), user='postgres'),
blueprint.upload(os.path.join('.', 'pg_hba.conf'), postgres_root(), user='postgres'),
blueprint.upload(os.path.join('.', 'postgresql-{}.conf'.format(version())),
postgres_root('postgresql.conf'), context=context, user='postgres')]
if any(updates):
restart()
@task
def setup_schemas(drop=False):
"""
Create database schemas and grant user privileges
:param drop: Drop existing schemas before creation
"""
schemas = blueprint.get('schemas', {})
with sudo('postgres'):
for schema, config in schemas.iteritems():
user, password = config['user'], config.get('password')
info('Creating user {}', user)
if password:
_client_exec("CREATE ROLE %(user)s WITH PASSWORD '%(password)s' LOGIN",
user=user, password=password)
else:
_client_exec("CREATE ROLE %(user)s LOGIN", user=user)
if drop:
info('Droping schema {}', schema)
_client_exec('DROP DATABASE %(name)s', name=schema)
info('Creating schema {}', schema)
_client_exec('CREATE DATABASE %(name)s', name=schema)
info('Granting user {} to schema {}'.format(user, schema))
_client_exec("GRANT ALL PRIVILEGES ON DATABASE %(schema)s to %(user)s",
schema=schema, user=user)
for ext in config.get('extensions', []):
info('Creating extension {} on schema {}'.format(ext, schema))
_client_exec("CREATE EXTENSION IF NOT EXISTS %(ext)s SCHEMA %(schema)s"
schema=schema, ext=ext)
def _client_exec(cmd, **kwargs):
with sudo('postgres'):
schema = kwargs.get('schema', 'template1')
return run("echo \"%s;\" | psql -d %s" % (cmd % kwargs, schema))
def setup_shared_memory():
"""
http://leopard.in.ua/2013/09/05/postgresql-sessting-shared-memory/
"""
sysctl_path = '/etc/sysctl.conf'
shmmax_configured = files.contains(sysctl_path, 'kernel.shmmax')
shmall_configured = files.contains(sysctl_path, 'kernel.shmall')
if not any([shmmax_configured, shmall_configured]):
page_size = debian.page_size()
phys_pages = debian.phys_pages()
shmall = phys_pages / 2
shmmax = shmall * page_size
shmmax_str = 'kernel.shmmax = {}'.format(shmmax)
shmall_str = 'kernel.shmall = {}'.format(shmall)
with sudo():
files.append(sysctl_path, shmmax_str, partial=True)
files.append(sysctl_path, shmall_str, partial=True)
run('sysctl -p')
info('Added **{}** to {}', shmmax_str, sysctl_path)
info('Added **{}** to {}', shmall_str, sysctl_path)
@task
def generate_pgtune_conf(role='db'):
"""
Run pgtune and create pgtune.conf
:param role: Which fabric role to place local pgtune.conf template under
"""
conf_path = postgres_root('postgresql.conf')
with sudo(), silent():
output = run('pgtune -T Web -i {}'.format(conf_path)).strip()
def parse(c):
lines = [l for l in c.splitlines() if '# pgtune' in l]
for line in lines:
try:
comment = line.index('#')
line = line[:comment]
except ValueError:
pass
clean = lambda s: s.strip('\n\r\t\'" ')
key, _, value = line.partition('=')
key, value = map(clean, (key, value))
if key:
yield key, value or None
tune_conf = dict(parse(output))
tune_conf.update(blueprint.get('pgtune', {}))
tune_conf = '\n'.join((' = '.join(item)) for item in tune_conf.iteritems())
conf_dir = os.path.join(os.path.dirname(env['real_fabfile']), 'templates', role, 'postgres')
conf_path = os.path.join(conf_dir, 'pgtune.conf')
if not os.path.exists(conf_dir):
os.makedirs(conf_dir)
with open(conf_path, 'w+') as f:
f.write(tune_conf)
@task
def dump(schema=None):
"""
Dump and download all configured, or given, schemas.
:param schema: Specific shema to dump and download.
"""
if not schema:
schemas = blueprint.get('schemas', {}).keys()
for i, schema in enumerate(schemas, start=1):
print("{i}. {schema}".format(i=i, schema=schema))
valid_indices = '[1-{}]+'.format(len(schemas))
schema_choice = prompt('Select schema to dump:', default='1', validate=valid_indices)
schema = schemas[int(schema_choice)-1]
with sudo('postgres'):
now = datetime.now().strftime('%Y-%m-%d')
output_file = '/tmp/{}_{}.backup'.format(schema, now)
filename = os.path.basename(output_file)
options = dict(
format='tar',
output_file=output_file,
schema=schema
)
info('Dumping schema {}...', schema)
run('pg_dump -c -F {format} -f {output_file} {schema}'.format(**options))
info('Downloading dump...')
local_file = '~/{}'.format(filename)
files.get(output_file, local_file)
with sudo(), silent():
debian.rm(output_file)
info('New smoking hot dump at {}', local_file)
|
Python
| 0.000002 |
@@ -3918,16 +3918,17 @@
chema)s%22
+,
%0A
|
aed1f0e4e33dd956f4499ecffd6bf50bb58e7df4
|
Add fermi.py
|
scripts/fermi.py
|
scripts/fermi.py
|
Python
| 0.000022 |
@@ -0,0 +1,1597 @@
+# This example file is part of the ENVISIoN Electronic structure visualization studio%0A#%0A# Load this file into the Inviwo Python Editor (which you can access under the menu Python, %0A# which is available if Inviwo has been compiled with the Python module on)%0A#%0A# For Copyright and License information see the file LICENSE distributed alongside ENVISIoN%0A# %0A# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS %22AS IS%22 %0A# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE %0A# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE %0A# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE %0A# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL %0A# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR %0A# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER %0A# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, %0A# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE %0A# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.%0A#%0A%0Aimport os, sys%0A%0A# Configuration%0APATH_TO_ENVISION=os.path.expanduser(%22~/ENVISIoN/envision%22)%0APATH_TO_VASP_CALC=os.path.expanduser(%22~/ENVISIoN/data/Cu/1/11%22)%0APATH_TO_HDF5=os.path.expanduser(%22/tmp/envision_demo.hdf5%22)%0A%0Asys.path.insert(0, os.path.expanduser(PATH_TO_ENVISION)) # Or %60pip install --editable%60.%0A%0Aimport envision%0Aimport envision.inviwo%0A%0Aenvision.parser.vasp.fermi(PATH_TO_HDF5, PATH_TO_VASP_CALC)%0A%0Axpos=0%0A%0Aenvision.inviwo.fermi(PATH_TO_HDF5, xpos)%0A%0A
|
|
9ffa7ab2b4b5fb03d9cd8dd2740234ebaf8c8097
|
Add per client ignore exception option.
|
redis_cache/cache.py
|
redis_cache/cache.py
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.core.cache.backends.base import BaseCache
from django.core.exceptions import ImproperlyConfigured
from django.core.cache import get_cache
from .util import load_class
from .exceptions import ConnectionInterrupted
import functools
DJANGO_REDIS_IGNORE_EXCEPTIONS = getattr(settings,
"DJANGO_REDIS_IGNORE_EXCEPTIONS", False)
def omit_exception(method):
"""
Simple decorator that intercepts connection
errors and ignores these if settings specify this.
Note: this doesn't handle the `default` argument in .get().
"""
if not DJANGO_REDIS_IGNORE_EXCEPTIONS:
return method
@functools.wraps(method)
def _decorator(self, *args, **kwargs):
try:
return method(self, *args, **kwargs)
except ConnectionInterrupted:
return None
return _decorator
class RedisCache(BaseCache):
def __init__(self, server, params):
super(RedisCache, self).__init__(params)
self._server = server
self._params = params
options = params.get("OPTIONS", {})
self._client_cls = options.get("CLIENT_CLASS", "redis_cache.client.DefaultClient")
self._client_cls = load_class(self._client_cls)
self._client = None
@property
def client(self):
"""
Lazy client connection property.
"""
if self._client is None:
self._client = self._client_cls(self._server, self._params, self)
return self._client
@property
def raw_client(self):
"""
Return a raw redis client (connection). Not all
pluggable clients supports this feature. If not supports
this raises NotImplementedError
"""
return self.client.get_client(write=True)
@omit_exception
def set(self, *args, **kwargs):
return self.client.set(*args, **kwargs)
@omit_exception
def incr_version(self, *args, **kwargs):
return self.client.incr_version(*args, **kwargs)
@omit_exception
def add(self, *args, **kwargs):
return self.client.add(*args, **kwargs)
@omit_exception
def get(self, key, default=None, version=None, client=None):
try:
return self.client.get(key, default=default, version=version,
client=client)
except ConnectionInterrupted:
if DJANGO_REDIS_IGNORE_EXCEPTIONS:
return default
raise
@omit_exception
def delete(self, *args, **kwargs):
return self.client.delete(*args, **kwargs)
@omit_exception
def delete_pattern(self, *args, **kwargs):
return self.client.delete_pattern(*args, **kwargs)
@omit_exception
def delete_many(self, *args, **kwargs):
return self.client.delete_many(*args, **kwargs)
@omit_exception
def clear(self):
return self.client.clear()
@omit_exception
def get_many(self, *args, **kwargs):
return self.client.get_many(*args, **kwargs)
@omit_exception
def set_many(self, *args, **kwargs):
return self.client.set_many(*args, **kwargs)
@omit_exception
def incr(self, *args, **kwargs):
return self.client.incr(*args, **kwargs)
@omit_exception
def decr(self, *args, **kwargs):
return self.client.decr(*args, **kwargs)
@omit_exception
def has_key(self, *args, **kwargs):
return self.client.has_key(*args, **kwargs)
@omit_exception
def keys(self, *args, **kwargs):
return self.client.keys(*args, **kwargs)
@omit_exception
def close(self, **kwargs):
self.client.close(**kwargs)
|
Python
| 0 |
@@ -618,74 +618,8 @@
%22%22%0A%0A
- if not DJANGO_REDIS_IGNORE_EXCEPTIONS:%0A return method%0A%0A
@@ -678,32 +678,72 @@
rgs, **kwargs):%0A
+ if self._ignore_exceptions:%0A
try:%0A
@@ -731,32 +731,36 @@
try:%0A
+
retu
@@ -792,16 +792,20 @@
kwargs)%0A
+
@@ -842,24 +842,28 @@
+
return None%0A
@@ -862,16 +862,80 @@
rn None%0A
+ else:%0A return method(self, *args, **kwargs)%0A%0A
retu
@@ -1349,16 +1349,115 @@
= None%0A%0A
+ self._ignore_exceptions = options.get(%22IGNORE_EXCEPTIONS%22, DJANGO_REDIS_IGNORE_EXCEPTIONS)%0A
%0A @pr
|
b4fd94008fa5b1dcdb6dd61651d8776dfb41f2d6
|
Make sure we return a list.
|
oscar/apps/dashboard/catalogue/widgets.py
|
oscar/apps/dashboard/catalogue/widgets.py
|
import six
from django.forms.util import flatatt
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from django import forms
class ProductSelect(forms.Widget):
is_multiple = False
css = 'select2 input-xlarge'
def format_value(self, value):
return six.text_type(value or '')
def value_from_datadict(self, data, files, name):
value = data.get(name, None)
if value is None:
return value
else:
return six.text_type(value)
def render(self, name, value, attrs=None, choices=()):
attrs = self.build_attrs(attrs, **{
'type': 'hidden',
'class': self.css,
'name': name,
'data-ajax-url': reverse('dashboard:catalogue-product-lookup'),
'data-multiple': 'multiple' if self.is_multiple else '',
'value': self.format_value(value),
'data-required': 'required' if self.is_required else '',
})
return mark_safe(u'<input %s>' % flatatt(attrs))
class ProductSelectMultiple(ProductSelect):
is_multiple = True
css = 'select2 input-xxlarge'
def format_value(self, value):
if value:
return ','.join(map(six.text_type, filter(bool, value)))
else:
return ''
def value_from_datadict(self, data, files, name):
value = data.get(name, None)
if value is None:
return []
else:
return filter(bool, value.split(','))
|
Python
| 0.000272 |
@@ -1482,16 +1482,21 @@
return
+list(
filter(b
@@ -1517,9 +1517,10 @@
it(','))
+)
%0A
|
7e449b0267f47ee08327d9d76976c5e1b197501b
|
Add missing migration (#9504)
|
osf/migrations/0219_auto_20201020_1836.py
|
osf/migrations/0219_auto_20201020_1836.py
|
Python
| 0.000002 |
@@ -0,0 +1,2634 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.28 on 2020-10-20 18:36%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('osf', '0218_auto_20200929_1850'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='draftregistration',%0A name='machine_state',%0A field=models.CharField(choices=%5B('initial', 'Initial'), ('pending', 'Pending'), ('accepted', 'Accepted'), ('rejected', 'Rejected'), ('withdrawn', 'Withdrawn'), ('pending_embargo', 'Pending_Embargo'), ('embargo', 'Embargo'), ('pending_embargo_termination', 'Pending_Embargo_Termination'), ('pending_withdraw_request', 'Pending_Withdraw_Request'), ('pending_withdraw', 'Pending_Withdraw')%5D, db_index=True, default='initial', max_length=30),%0A ),%0A migrations.AlterField(%0A model_name='registrationaction',%0A name='from_state',%0A field=models.CharField(choices=%5B('initial', 'Initial'), ('pending', 'Pending'), ('accepted', 'Accepted'), ('rejected', 'Rejected'), ('withdrawn', 'Withdrawn'), ('pending_embargo', 'Pending_Embargo'), ('embargo', 'Embargo'), ('pending_embargo_termination', 'Pending_Embargo_Termination'), ('pending_withdraw_request', 'Pending_Withdraw_Request'), ('pending_withdraw', 'Pending_Withdraw')%5D, max_length=31),%0A ),%0A migrations.AlterField(%0A model_name='registrationaction',%0A name='to_state',%0A field=models.CharField(choices=%5B('initial', 'Initial'), ('pending', 'Pending'), ('accepted', 'Accepted'), ('rejected', 'Rejected'), ('withdrawn', 'Withdrawn'), ('pending_embargo', 'Pending_Embargo'), ('embargo', 'Embargo'), ('pending_embargo_termination', 'Pending_Embargo_Termination'), ('pending_withdraw_request', 'Pending_Withdraw_Request'), ('pending_withdraw', 'Pending_Withdraw')%5D, max_length=31),%0A ),%0A migrations.AlterField(%0A model_name='registrationaction',%0A name='trigger',%0A field=models.CharField(choices=%5B('submit', 'Submit'), ('accept', 'Accept'), ('reject', 'Reject'), ('edit_comment', 'Edit_Comment'), ('embargo', 'Embargo'), ('withdraw', 'Withdraw'), ('request_withdraw', 'Request_Withdraw'), ('withdraw_request_fails', 'Withdraw_Request_Fails'), ('withdraw_request_pass', 'Withdraw_Request_Pass'), ('reject_withdraw', 'Reject_Withdraw'), ('force_withdraw', 'Force_Withdraw'), ('request_embargo', 'Request_Embargo'), ('request_embargo_termination', 'Request_Embargo_Termination'), ('terminate_embargo', 'Terminate_Embargo')%5D, max_length=31),%0A ),%0A %5D%0A
|
|
c137028a98cd762a4e93950fbde085969500999e
|
Build tagger
|
installer/build_tag.py
|
installer/build_tag.py
|
Python
| 0.000001 |
@@ -0,0 +1,452 @@
+#!/usr/python%0D%0A%0D%0Aimport os%0D%0Afrom subprocess import call, check_output%0D%0Aver = check_output(%5B %22python%22, %22version.py%22, %22../apps/Tasks/src/version.h%22,%0D%0A%09%22PROGRAM_VERSION_MAJOR,PROGRAM_VERSION_MINOR,PROGRAM_VERSION_PATCH,PROGRAM_VERSION_BUILD%22,%0D%0A%09%22PROGRAM_VERSION_BUILD%22%5D)%0D%0AVERSION = ver.strip()%0D%0A%0D%0Acall(%5B%22git%22,%22add%22,%22../apps/Tasks/src/version.h%22%5D)%0D%0Acall(%5B%22git%22,%22commit%22,%22-m%22,%22Build for version %25s%22 %25 VERSION%5D)%0D%0Acall(%5B%22git%22,%22tag%22,%22tag-build-v%25s%22 %25 VERSION%5D)
|
|
866b1c634c4fc6dc27ad953ccde6b6dcd11dcc91
|
Add mood light script
|
moodlight.py
|
moodlight.py
|
Python
| 0.000001 |
@@ -0,0 +1,1101 @@
+from maya.utils import executeDeferred%0Aimport pymel.core as pm%0Aimport threading%0Aimport time%0A%0A%0A_active_mood_light = None%0A_running = False%0A%0A%0Aclass MoodLightThread(threading.Thread):%0A%0A%09def __init__(self, speed):%0A%09%09self.speed = speed%0A%09%09super(MoodLightThread, self).__init__()%0A%0A%09def run(self):%0A%09%09while _running:%0A%09%09%09time.sleep(0.05)%0A%09%09%09color = pm.dt.Color()%0A%09%09%09hue = time.time() * self.speed %25 1 * 360%0A%09%09%09color.set('HSV', hue, 1, 0.3)%0A%09%09%09executeDeferred(%0A%09%09%09%09pm.mel.displayRGBColor,%0A%09%09%09%09'backgroundBottom',%0A%09%09%09%09color.r,%0A%09%09%09%09color.g,%0A%09%09%09%09color.b%0A%09%09%09)%0A%09%09%09color.set('HSV', hue, 0.3, 1)%0A%09%09%09executeDeferred(%0A%09%09%09%09pm.mel.displayRGBColor,%0A%09%09%09%09'backgroundTop',%0A%09%09%09%09color.r,%0A%09%09%09%09color.g,%0A%09%09%09%09color.b%0A%09%09%09)%0A%0A%0Adef is_running():%0A%09global _active_mood_light, _running%0A%09return _active_mood_light is not None and _running%0A%0A%0Adef start(speed=0.05):%0A%09global _active_mood_light, _running%0A%09stop()%0A%09_running = True%0A%09_active_mood_light = MoodLightThread(speed)%0A%09_active_mood_light.start()%0A%0A%0Adef stop():%0A%09global _active_mood_light, _running%0A%09if is_running():%0A%09%09_running = False%0A%09%09_active_mood_light.join()%0A%09%09_active_mood_light = None
|
|
b0d699066799d0309e7af3f8892f56a6feaac778
|
Write tests for new functionality; several destinations
|
new_tests.py
|
new_tests.py
|
Python
| 0.000001 |
@@ -0,0 +1,1292 @@
+from numpy import testing%0Aimport unittest%0Aimport numpy as np%0Afrom numpy import pi%0A%0Afrom robot_arm import RobotArm%0A%0A%0Aclass TestRobotArm(unittest.TestCase):%0A%0A def setUp(self):%0A self.lengths = (3, 2, 2,)%0A self.destinations = (%0A (5, 0,),%0A (4, 2,),%0A (6, 0.5),%0A (4, -2),%0A (5, -1),%0A )%0A self.theta = (pi, pi/2, 0,)%0A%0A def test_init_all_arguments(self):%0A RobotArm(self.lengths, self.destinations, self.theta)%0A%0A def test_init_without_theta(self):%0A RobotArm(self.lengths, self.destinations)%0A%0A def test_wrong_lengths_type(self):%0A self.assertRaises(%0A AssertionError,%0A RobotArm,%0A np.array(self.lengths),%0A self.destinations,%0A self.theta)%0A%0A def test_wrong_destinations_type(self):%0A self.assertRaises(%0A AssertionError,%0A RobotArm,%0A self.lengths,%0A np.array(self.destinations),%0A self.theta)%0A%0A def test_wrong_theta_type(self):%0A self.assertRaises(%0A AssertionError,%0A RobotArm,%0A self.lengths,%0A self.destinations,%0A np.array(self.theta))%0A
|
|
aafe6849ef2264606fcb6d3cceef27450f986a98
|
Add .. to list of search paths
|
libnamebench/util.py
|
libnamebench/util.py
|
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Little utility functions."""
__author__ = '[email protected] (Thomas Stromberg)'
import math
import re
import util
import os.path
import sys
import traceback
# third party lib
import dns.resolver
import nameserver
def CalculateListAverage(values):
"""Computes the arithmetic mean of a list of numbers."""
if not values:
return 0
return sum(values) / float(len(values))
def DrawTextBar(value, max_value, max_width=53):
"""Return a simple ASCII bar graph, making sure it fits within max_width.
Args:
value: integer or float representing the value of this bar.
max_value: integer or float representing the largest bar.
max_width: How many characters this graph can use (int)
Returns:
string
"""
hash_width = max_value / max_width
return int(math.ceil(value/hash_width)) * '#'
def SecondsToMilliseconds(seconds):
return seconds * 1000
def SplitSequence(seq, size):
"""Recipe From http://code.activestate.com/recipes/425397/
Modified to not return blank values."""
newseq = []
splitsize = 1.0/size*len(seq)
for i in range(size):
newseq.append(seq[int(round(i*splitsize)):int(round((i+1)*splitsize))])
return [ x for x in newseq if x ]
def InternalNameServers():
"""Return list of DNS server IP's used by the host."""
try:
return dns.resolver.Resolver().nameservers
except:
print "Unable to get list of internal DNS servers."
return []
def ExtractIPsFromString(ip_string):
"""Return a tuple of ip addressed held in a string."""
ips = []
# IPV6 If this regexp is too loose, see Regexp-IPv6 in CPAN for inspiration.
ips.extend(re.findall('[\dabcdef:]+:[\dabcdef:]+', ip_string, re.IGNORECASE))
ips.extend(re.findall('\d+\.\d+\.\d+\.+\d+', ip_string))
return ips
def ExtractIPTuplesFromString(ip_string):
ip_tuples = []
for ip in ExtractIPsFromString(ip_string):
ip_tuples.append((ip,ip))
return ip_tuples
def FindDataFile(filename):
if os.path.exists(filename):
return filename
# If it's not a relative path, we can't do anything useful.
if os.path.isabs(filename):
return filename
other_places = [os.getcwd(),
os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'Contents', 'Resources'),
os.path.join(os.getcwd(), 'namebench.app', 'Contents', 'Resources'),
os.path.join(sys.prefix, 'namebench'),
'/usr/local/share/namebench'
'/usr/local/etc/namebench',
'/usr/local/namebench',
'/etc/namebench',
'/usr/share/namebench',
'/usr/namebench']
for dir in reversed(sys.path):
other_places.append(dir)
other_places.append(os.path.join(dir, 'namebench'))
for place in other_places:
path = os.path.join(place, filename)
if os.path.exists(path):
return path
print "I could not find your beloved '%s'. Tried:" % filename
for path in other_places:
print " %s" % path
return filename
def GetLastExceptionString():
"""Get the last exception and return a good looking string for it."""
(exc, error) = sys.exc_info()[0:2]
exc_msg = str(exc)
if '<class' in exc_msg:
exc_msg = exc_msg.split("'")[1]
exc_msg = exc_msg.replace('dns.exception.', '')
return '%s %s' % (exc_msg, error)
|
Python
| 0 |
@@ -2921,32 +2921,83 @@
, 'Resources'),%0A
+ os.path.join(os.getcwd(), '..'),%0A
|
50f698c2fdd90bc4b3e60a583c196381fc23e099
|
Implement a rudimentary API for LLTK
|
lltk-restful/base.py
|
lltk-restful/base.py
|
Python
| 0.000034 |
@@ -0,0 +1,1344 @@
+#!/usr/bin/python%0A# -*- coding: UTF-8 -*-%0A%0Aimport lltk%0Aimport lltk.generic%0Aimport lltk.caching%0Aimport lltk.exceptions%0A%0Afrom flask import Flask%0Afrom flask import jsonify, request%0A%0A__author__ = 'Markus Beuckelmann'%0A__author_email__ = '[email protected]'%0A__version__ = '0.1.0'%0A%0ADEBUG = True%0ACACHING = True%0ANAME = 'lltk-restful'%0AHOST = '127.0.0.1'%0APORT = 5000%0A%0Aapp = Flask(NAME)%0A%0Aif DEBUG:%0A%09app.debug = True%0A%09lltk.config%5B'debug'%5D = True%0Aif not CACHING:%0A%09lltk.caching.disable()%0A%[email protected]('/lltk/%3Cstring:language%3E/%3Cstring:method%3E/%3Cstring:word%3E', methods = %5B'GET'%5D)%[email protected]('/lltk/%3Cstring:language%3E/%3Cstring:method%3E/%3Cpath:extraargs%3E/%3Cstring:word%3E', methods = %5B'GET'%5D)%0Adef lltkapi(language, method, word, extraargs = tuple()):%0A%09''' Returns LLTK's results as a JSON document. '''%0A%0A%09data = dict()%0A%09data%5B'language'%5D = language%0A%09data%5B'method'%5D = method%0A%09data%5B'word'%5D = word%0A%09data%5B'result'%5D = None%0A%0A%09if hasattr(lltk.generic, method) and callable(getattr(lltk.generic, method)):%0A%09%09function = getattr(lltk.generic, method)%0A%09%09if not isinstance(extraargs, tuple):%0A%09%09%09extraargs = tuple(extraargs.split('/'))%0A%09%09kwargs = request.args.to_dict()%0A%09%09data%5B'result'%5D = function(language, word, *extraargs, **kwargs)%0A%09else:%0A%09%09return http_404(NotImplementedError)%0A%0A%09return jsonify(data)%0A%0Aif __name__ == '__main__':%0A%0A%09app.run(%0A%09%09host = HOST,%0A%09%09port = PORT%0A%09)%0A
|
|
04f19b29c79e1ab624d7ce596730ad9b4fd500fd
|
add lcdb.helpers.py
|
lcdb/helpers.py
|
lcdb/helpers.py
|
Python
| 0.001346 |
@@ -0,0 +1,338 @@
+import yaml%0Afrom jsonschema import validate, ValidationError%0A%0A%0Adef validate_config(config, schema):%0A schema = yaml.load(open(schema))%0A cfg = yaml.load(open(config))%0A try:%0A validate(cfg, schema)%0A except ValidationError as e:%0A msg = '%5CnPlease fix %25s: %25s%5Cn' %25 (config, e.message)%0A raise ValidationError(msg)%0A
|
|
f4944256092b085b1546eaec114e0987da6697bc
|
add simple cli client
|
instapaper_cli.py
|
instapaper_cli.py
|
Python
| 0.000001 |
@@ -0,0 +1,1079 @@
+#!/opt/local/bin/python2.6%0A%0Afrom instapaper import Instapaper%0Afrom optparse import OptionParser%0Afrom getpass import getpass%0A%0Adef usage():%0A print %22Usage: instapaper.py %5B-h%5D username password url%22%0A print %22Options:%22%0A print %22-h Print this help%22%0A%0Adef main():%0A%0A # initialize parser%0A usage = %22usage: %25prog -u USER %5B-t TITLE%5D url%22%0A parser = OptionParser(usage)%0A parser.add_option(%22-u%22, %22--user%22, action=%22store%22, dest=%22user%22,metavar=%22USER%22,%0A help=%22instapaper username%22)%0A parser.add_option(%22-t%22, %22--title%22, action=%22store%22, dest=%22title%22,metavar=%22TITLE%22,%0A help=%22title of the link to add%22)%0A%0A (options, args) = parser.parse_args()%0A%0A if not options.user:%0A parser.error(%22No instapaper user given.%22)%0A else:%0A title = %22%22%0A if options.title:%0A title = options.title%0A pw = getpass()%0A inst = Instapaper(options.user,pw)%0A result = inst.addItem(args%5B0%5D,title)%0A if (result == -1):%0A print %22Uh-Oh, something went wrong.%22%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
|
|
4d740138dc7101e2816837c070d3051835977d75
|
Add lc0621_task_scheduler.py
|
lc0621_task_scheduler.py
|
lc0621_task_scheduler.py
|
Python
| 0.000004 |
@@ -0,0 +1,1116 @@
+%22%22%22Leetcode 621. Task Scheduler%0AMedium%0A%0AURL: https://leetcode.com/problems/task-scheduler/%0A%0AGiven a char array representing tasks CPU need to do. It contains capital letters%0AA to Z where different letters represent differenttasks. Tasks could be done%0Awithout original order. Each task could be done in one interval. For each%0Ainterval, CPU could finish one task or just be idle.%0A%0AHowever, there is a non-negative cooling interval n that means between two same%0Atasks, there must be at least n intervals that CPU are doing different tasks or%0Ajust be idle.%0A%0AYou need to return the least number of intervals the CPU will take to finish all%0Athe given tasks.%0A%0AExample:%0AInput: tasks = %5B%22A%22,%22A%22,%22A%22,%22B%22,%22B%22,%22B%22%5D, n = 2%0AOutput: 8%0AExplanation: A -%3E B -%3E idle -%3E A -%3E B -%3E idle -%3E A -%3E B.%0A%0ANote:%0A- The number of tasks is in the range %5B1, 10000%5D.%0A- The integer n is in the range %5B0, 100%5D.%0A%22%22%22%0A%0Aclass Solution(object):%0A def leastInterval(self, tasks, n):%0A %22%22%22%0A :type tasks: List%5Bstr%5D%0A :type n: int%0A :rtype: int%0A %22%22%22%0A pass%0A%0A%0Adef main():%0A pass%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
af8f7a09c6cf8a96b716d016fc3a983340760869
|
Create problem10.py
|
python/problem10.py
|
python/problem10.py
|
Python
| 0.000029 |
@@ -0,0 +1,184 @@
+import primes%0A%0Adef problem10(limit):%0A ps = itertools.takewhile(lambda x: x %3C limit, primes.Eppstein_Sieve())%0A # ps = primes.Eratosthenes(limit) # memory error%0A return sum(ps)%0A
|
|
5b276622f570adac64eda9932c7da47bf4bcd25c
|
Add PPM sample
|
ppm_practice.py
|
ppm_practice.py
|
Python
| 0 |
@@ -0,0 +1,1409 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0A%0Aclass PpmImage(object):%0A %22%22%22PPM %E7%94%BB%E5%83%8F%E3%82%92%E8%A1%A8%E3%81%99%E3%82%AF%E3%83%A9%E3%82%B9%22%22%22%0A%0A def __init__(self, name, width, height, image, depth=8):%0A %22%22%22%0A :param name:%0A :param width:%0A :param height:%0A :param image:%0A :param depth depth: %E5%90%84%E8%89%B2%E3%81%AE%E9%9A%8E%E8%AA%BF%E6%95%B0 (bit)%0A :return:%0A %22%22%22%0A self.name = name%0A self.width = width%0A self.height = height%0A self.image = image%0A self.depth = depth%0A%0A def dump(self, fp):%0A %22%22%22%E3%83%95%E3%82%A1%E3%82%A4%E3%83%AB%E3%81%AB%E7%94%BB%E5%83%8F%E3%83%87%E3%83%BC%E3%82%BF%E3%82%92%E6%9B%B8%E3%81%8D%E8%BE%BC%E3%82%80%E5%87%A6%E7%90%86%22%22%22%0A fp.write('P3%5Cn')%0A fp.write('# ' + self.name + '%5Cn')%0A fp.write('%7B0:d%7D %7B1:d%7D%5Cn'.format(self.width, self.height))%0A fp.write('%7B0:d%7D%5Cn'.format(2 ** self.depth - 1))%0A%0A # %E7%94%BB%E5%83%8F%E3%81%AE%E9%AB%98%E3%81%95%E3%81%8C%E4%B8%8D%E5%8D%81%E5%88%86%E3%81%A7%E3%81%82%E3%82%8C%E3%81%B0%E4%BE%8B%E5%A4%96%E3%82%92%E9%80%81%E5%87%BA%0A if len(self.image) != self.height:%0A raise IndexError()%0A for row in self.image:%0A # %E7%94%BB%E5%83%8F%E3%81%AE%E5%B9%85%E3%81%8C%E4%B8%8D%E5%8D%81%E5%88%86%E3%81%A7%E3%81%82%E3%82%8C%E3%81%B0%E4%BE%8B%E5%A4%96%E3%82%92%E9%80%81%E5%87%BA%0A if len(row) != 3 * self.width:%0A raise IndexError()%0A for x in range(0, self.width * 3, 3):%0A fp.write('%7B0:3d%7D %7B1:3d%7D %7B2:3d%7D%5Cn'.format(*row%5Bx:x+3%5D))%0A%0A%0Aif __name__ == '__main__':%0A # %E9%81%A9%E5%BD%93%E3%81%AA%E7%94%BB%E5%83%8F%E3%82%92%E4%BD%9C%E6%88%90%0A name = %22test.ppm%22%0A depth = 8%0A width = height = 64%0A data = %5B%5B(i + j) %25 2 ** depth for i in range(3 * width)%5D%0A for j in range(height)%5D%0A image = PpmImage(name, width, height, data, depth=depth)%0A%0A # %E3%83%95%E3%82%A1%E3%82%A4%E3%83%AB%E3%81%AB%E4%BF%9D%E5%AD%98%0A with open(%22test.ppm%22, 'w') as f:%0A image.dump(f)
|
|
4f404a71cb7ee912bca8184fe94c97d6cfba1186
|
Add script to rotate a solid angle in the xz plane
|
preprocessing_tools/solid_rotation_y.py
|
preprocessing_tools/solid_rotation_y.py
|
Python
| 0 |
@@ -0,0 +1,2276 @@
+'''%0ARotates the protein by a solid angle on the plane xz%0A'''%0A%0Aimport numpy%0Aimport os%0A%0Afrom argparse import ArgumentParser%0A%0Afrom move_prot_helper import (read_vertex, read_pqr, rotate_y,%0A modify_pqr)%0A%0Adef read_inputs():%0A %22%22%22%0A Parse command-line arguments to run move_protein.%0A%0A User should provide:%0A -inMesh : str, mesh file you want to rotate.%0A -inpqr : str, pqr of the object you want to rotate.%0A -alpha_y: float %5Bdegrees%5D, rotation angle, about the dipole moment. %0A -name : str, output file name.%0A %22%22%22%0A%0A parser = ArgumentParser(description='Manage solid_rotation_y command line arguments')%0A%0A%0A parser.add_argument('-im', '--inMesh', dest='im', type=str, default=None,%0A help=%22mesh file you want to rotate%22)%0A%0A parser.add_argument('-ip', '--inpqr', dest='ip', type=str, default=None,%0A help=%22pqr of the object you want to rotate%22)%0A%0A parser.add_argument('-angy', '--angle_y', dest='angy', type=float, default=None,%0A help=%22rotation angle in the plane xz%22)%0A%0A parser.add_argument('-n', '--name', dest='name', type=str, default='',%0A help=%22output file name%22)%0A %0A return parser.parse_args()%0A%0Aargs = read_inputs()%0A%0AinMesh = args.im%0Ainpqr = args.ip%0Aangle_y = float(args.angy)*numpy.pi/180. %0Aname = args.name%0A%0AoutMesh = inMesh + name%0Aoutpqr = inpqr + name%0A%0A#Read mesh and pqr%0A#vert = read_vertex(inMesh+'.vert', float)%0Avert = numpy.loadtxt(inMesh+'.vert', dtype=float)%0A%0Axq, q, Nq = read_pqr(inpqr+'.pqr', float)%0A%0Axq_new = rotate_y(xq, angle_y)%0Avert_new = rotate_y(vert, angle_y)%0A%0Actr = numpy.average(vert_new, axis=0) %0A%0Ar_min_last = numpy.min(numpy.linalg.norm(vert_new, axis=1))%0Aidx_rmin_last = numpy.argmin(numpy.linalg.norm(vert_new, axis=1))%0A%0Aprint ('Desired configuration:')%0A%0Aprint ('%5CtProtein is centered, %7B%7D'.format(ctr))%0Aprint ('%5CtProtein r minimum is %7B%7D, located at %7B%7D'.format(r_min_last,%0A vert_new%5Bidx_rmin_last, :%5D))%0A%0A#### Save to file%0Anumpy.savetxt(outMesh+'.vert', vert_new)%0Acmd = 'cp '+inMesh+'.face '+outMesh+'.face'%0Aos.system(cmd)%0A%0Amodify_pqr(inpqr+'.pqr', outpqr+'.pqr', xq_new)%0A%0Aprint ('%5CnWritten to '+outMesh+'.vert(.face) and '+outpqr+'.pqr')%0A
|
|
daf23cbb6d6015a2819de5d089a35903cbce9441
|
Create katakan.py
|
list/katakan.py
|
list/katakan.py
|
Python
| 0.000004 |
@@ -0,0 +1,984 @@
+%22%22%22%0A4%0A2 belas%0Aseratus 4 puluh 0%0A9 ribu seratus 2 puluh 1%0A2 puluh 1 ribu 3 puluh 0%0A9 ratus 5 ribu 0%0A8 puluh 2 juta 8 ratus 8 belas ribu seratus 8 puluh 8%0A3 ratus 1 juta 4 puluh 8 ribu 5 ratus 8 puluh 8%0A%22%22%22%0A%0Adef kata(n):%0A angka = range(11)%0A temp = %22%22%0A%0A if n %3C 12: %0A temp += str(angka%5Bn%5D)%0A elif n %3C 20: %0A temp += str(n-10)+%22 belas%22%0A elif n %3C 100: %0A temp += str(kata(n/10)) + %22 puluh %22+ str(kata(n%2510))%0A elif n %3C 200:%0A temp += %22seratus %22+ str(kata(n-100))%0A elif n %3C 1000:%0A temp += str(kata(n/100))+ %22 ratus %22 + str(kata(n%25100))%0A elif n %3C 2000:%0A temp += %22seribu %22+str(kata(n-1000))%0A elif n %3C 1000000:%0A temp += str(kata(n/1000))+ %22 ribu %22+ str(kata(n%251000))%0A elif n %3C 1000000000:%0A temp += str(kata(n/1000000)) +%22 juta %22 + str(kata(n%251000000))%0A%0A return temp%0A%0Aprint kata(4)%0Aprint kata(12)%0Aprint kata(140)%0Aprint kata(9121)%0Aprint kata(21030)%0Aprint kata(905000)%0Aprint kata(82818188)%0Aprint kata(301048588)%0A
|
|
69be3c0efe6d0a508eac0e9bcc837eac9d68e8f0
|
Improve python p.a.c.k.er detection, fix #479
|
python/jsbeautifier/unpackers/packer.py
|
python/jsbeautifier/unpackers/packer.py
|
#
# Unpacker for Dean Edward's p.a.c.k.e.r, a part of javascript beautifier
# by Einar Lielmanis <[email protected]>
#
# written by Stefano Sanfilippo <[email protected]>
#
# usage:
#
# if detect(some_string):
# unpacked = unpack(some_string)
#
"""Unpacker for Dean Edward's p.a.c.k.e.r"""
import re
import string
from jsbeautifier.unpackers import UnpackingError
PRIORITY = 1
def detect(source):
"""Detects whether `source` is P.A.C.K.E.R. coded."""
return source.replace(' ', '').startswith('eval(function(p,a,c,k,e,')
def unpack(source):
"""Unpacks P.A.C.K.E.R. packed js code."""
payload, symtab, radix, count = _filterargs(source)
if count != len(symtab):
raise UnpackingError('Malformed p.a.c.k.e.r. symtab.')
try:
unbase = Unbaser(radix)
except TypeError:
raise UnpackingError('Unknown p.a.c.k.e.r. encoding.')
def lookup(match):
"""Look up symbols in the synthetic symtab."""
word = match.group(0)
return symtab[unbase(word)] or word
source = re.sub(r'\b\w+\b', lookup, payload)
return _replacestrings(source)
def _filterargs(source):
"""Juice from a source file the four args needed by decoder."""
argsregex = (r"}\('(.*)', *(\d+), *(\d+), *'(.*)'\."
r"split\('\|'\), *(\d+), *(.*)\)\)")
args = re.search(argsregex, source, re.DOTALL).groups()
try:
return args[0], args[3].split('|'), int(args[1]), int(args[2])
except ValueError:
raise UnpackingError('Corrupted p.a.c.k.e.r. data.')
def _replacestrings(source):
"""Strip string lookup table (list) and replace values in source."""
match = re.search(r'var *(_\w+)\=\["(.*?)"\];', source, re.DOTALL)
if match:
varname, strings = match.groups()
startpoint = len(match.group(0))
lookup = strings.split('","')
variable = '%s[%%d]' % varname
for index, value in enumerate(lookup):
source = source.replace(variable % index, '"%s"' % value)
return source[startpoint:]
return source
class Unbaser(object):
"""Functor for a given base. Will efficiently convert
strings to natural numbers."""
ALPHABET = {
62 : '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ',
95 : (' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'[\]^_`abcdefghijklmnopqrstuvwxyz{|}~')
}
def __init__(self, base):
self.base = base
# If base can be handled by int() builtin, let it do it for us
if 2 <= base <= 36:
self.unbase = lambda string: int(string, base)
else:
# Build conversion dictionary cache
try:
self.dictionary = dict((cipher, index) for
index, cipher in enumerate(self.ALPHABET[base]))
except KeyError:
raise TypeError('Unsupported base encoding.')
self.unbase = self._dictunbaser
def __call__(self, string):
return self.unbase(string)
def _dictunbaser(self, string):
"""Decodes a value to an integer."""
ret = 0
for index, cipher in enumerate(string[::-1]):
ret += (self.base ** index) * self.dictionary[cipher]
return ret
|
Python
| 0.000003 |
@@ -1231,19 +1231,19 @@
-argsregex =
+juicers = %5B
(r%22
@@ -1278,17 +1278,51 @@
'(.*)'%5C.
-%22
+split%5C('%5C%7C'%5C), *(%5Cd+), *(.*)%5C)%5C)%22),
%0A
@@ -1334,23 +1334,27 @@
- r%22split%5C('%5C%7C'%5C
+(r%22%7D%5C('(.*)', *(%5Cd+
), *
@@ -1365,19 +1365,80 @@
), *
+'
(.*)
-%5C)
+'%5C.split%5C('%5C%7C'
%5C)%22)
+,
%0A
+ %5D%0A for juicer in juicers:%0A
@@ -1458,17 +1458,14 @@
rch(
-argsregex
+juicer
, so
@@ -1480,16 +1480,54 @@
.DOTALL)
+%0A if args:%0A a = args
.groups(
@@ -1528,21 +1528,28 @@
roups()%0A
-%0A
+
+
try:%0A
@@ -1557,16 +1557,24 @@
+
return a
rgs%5B
@@ -1573,20 +1573,14 @@
rn a
-rgs
%5B0%5D, a
-rgs
%5B3%5D.
@@ -1596,19 +1596,16 @@
), int(a
-rgs
%5B1%5D), in
@@ -1611,16 +1611,21 @@
nt(a
-rgs
%5B2%5D)%0A
+
@@ -1635,32 +1635,40 @@
ept ValueError:%0A
+
raise Un
@@ -1709,24 +1709,164 @@
r. data.')%0A%0A
+ # could not find a satisfying regex%0A raise UnpackingError('Could not make sense of p.a.c.k.e.r data (unexpected code structure)')%0A%0A%0A%0A
def _replace
|
1555164ff275436de580a33735a2d8c6e6893b42
|
Create lab4.py
|
laboratorios/lab4.py
|
laboratorios/lab4.py
|
Python
| 0.000001 |
@@ -0,0 +1,909 @@
+#lab 4%0A#josue dde leon%0A%0A%0Afor i in range (1, 4):%0A%09nombre = input(%22%5Cn%5Cnintroduce nombre: %22)%0A%09n1 = input (%22Introduce nota 1: %22)%0A%09n2 = input (%22Introduce nota 2: %22)%0A%09n3 = input (%22Introduce nota 3: %22)%0A%09n4 = input (%22Introduce nota 4: %22)%0A%09n5 = input (%22Introduce nota 5: %22)%0A%09prom=(float(n1)+float(n2)+float(n3)+float(n4)+float(n5))/5%09%0A%09print (%22%5CnNombre: %22 + str(nombre))%0A%09print (%22%5CnQuiz 1: %22 + str(n1))%0A%09print (%22Quiz 2: %22 + str(n2))%0A%09print (%22Quiz 3: %22 + str(n3))%0A%09print (%22Quiz 4: %22 + str(n4))%0A%09print (%22Quiz 5: %22 + str(n5))%0A%09print (%22%5Cn%5CnEl promedio de %22 + str(nombre) + %22 es %22 + str(prom))%0A%09archivo = open(nombre, 'w')%0A%09archivo.write(%22Nombre: %22 + str(nombre))%0A%09archivo.write(%22%5Cnquiz 1: %22 + n1)%0A%09archivo.write(%22%5Cnquiz 2: %22 + n2)%0A%09archivo.write(%22%5Cnquiz 3: %22 + n3)%0A%09archivo.write(%22%5Cnquiz 4: %22 + n4)%0A%09archivo.write(%22%5Cnquiz 5: %22 + n5)%0A%09archivo.write(%22%5CnEl promedio de %22 + str(nombre) + %22 es %22 + str(prom))%0A%09archivo.close() %0A
|
|
7b06edf37a630d4582fc84832cd1d40b790e4aa3
|
Add server
|
pygls/server.py
|
pygls/server.py
|
Python
| 0.000001 |
@@ -0,0 +1,1513 @@
+import asyncio%0Aimport logging%0A%0Afrom .protocol import LanguageServerProtocol%0A%0Alogger = logging.getLogger(__name__)%0A%0A%0Aclass Server:%0A def __init__(self, protocol_cls):%0A assert issubclass(protocol_cls, asyncio.Protocol)%0A self.loop = asyncio.get_event_loop()%0A%0A self.lsp = protocol_cls(self)%0A self.server = None%0A%0A def shutdown(self):%0A self.server.close()%0A # TODO: Gracefully shutdown event loops%0A%0A def start_tcp(self, host, port):%0A self.server = self.loop.run_until_complete(%0A self.loop.create_server(self.lsp, host, port)%0A )%0A self.loop.run_forever()%0A%0A%0Aclass LanguageServer(Server):%0A def __init__(self):%0A super().__init__(LanguageServerProtocol)%0A%0A def command(self, command_name):%0A '''%0A Registers new command (delegating to FeatureManager).%0A%0A Args:%0A command_name(str): Name of the command to register%0A '''%0A return self.lsp.fm.command(command_name)%0A%0A def feature(self, *feature_names, **options):%0A '''%0A Registers one or more LSP features (delegating to FeatureManager).%0A%0A Args:%0A *feature_names(tuple): One or more features to register%0A NOTE: All possible LSP features are listed in lsp module%0A **options(dict): Options for registered feature%0A E.G. triggerCharacters=%5B'.'%5D%0A '''%0A return self.lsp.fm.feature(*feature_names, **options)%0A%0A def thread(self):%0A return self.lsp.thread()%0A
|
|
357ce31d1f28fbc5d12a23dfd3bb2aa40a4e27a3
|
Add serialdumpbytexor.py
|
serialdumpbytexor.py
|
serialdumpbytexor.py
|
Python
| 0.000128 |
@@ -0,0 +1,394 @@
+#!/usr/bin/env python%0A%0Aimport sys, serial%0A%0Aif __name__ == '__main__':%0A ser = serial.Serial('/dev/cu.usbserial-A8004ISG', 115200, timeout=10, xonxoff=0, rtscts=0)%0A # ser.open()%0A bb = bytearray(512)%0A while 1:%0A ba = bytearray(ser.read(1024))%0A for i in range(512):%0A j = i * 2%0A bb%5Bi%5D = ba%5Bj%5D %5E ba%5Bj+1%5D%0A%09sys.stdout.write(bb)%0A sys.stdout.flush()%0A
|
|
6d8d76277d5d55d1be155763f380e0a573e03719
|
Fix a test which was broken since the last commit (Warning_:_)
|
test/test_timeout.py
|
test/test_timeout.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#Copyright (C) 2009-2010 :
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
#
#This file is part of Shinken.
#
#Shinken is free software: you can redistribute it and/or modify
#it under the terms of the GNU Affero General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Shinken is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Affero General Public License for more details.
#
#You should have received a copy of the GNU Affero General Public License
#along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
#It's ugly I know....
from shinken_test import *
# we have an external process, so we must un-fake time functions
time.time = original_time_time
time.sleep = original_time_sleep
from worker import Worker
from multiprocessing import Queue, Manager
from objects.service import Service
from objects.contact import Contact
modconf = Module()
class TestTimeout(ShinkenTest):
#Uncomment this is you want to use a specific configuration
#foreyour test
def setUp(self):
self.setup_with_file('etc/nagios_check_timeout.cfg')
def test_notification_timeout(self):
if os.name == 'nt':
return
# These queues connect a poller/reactionner with a worker
to_queue = Queue()
# manager = Manager()
from_queue = Queue()#manager.list()
control_queue = Queue()
# This testscript plays the role of the reactionner
# Now "fork" a worker
w = Worker(1,to_queue,from_queue,1)
w.id = 1
w.i_am_dying = False
# We prepare a notification in the to_queue
c = Contact()
c.contact_name = "mr.schinken"
n = Notification('PROBLEM', 'scheduled', 'libexec/sleep_command.sh 7', '', Service(), '', '', id=1)
n.status = "queue"
#n.command = "libexec/sleep_command.sh 7"
n.t_to_go = time.time()
n.contact = c
n.timeout = 2
n.env = {}
n.exit_status = 0
n.module_type = "fork"
nn = n.copy_shell()
# Send the job to the worker
msg = Message(id=0, type='Do', data=nn)
to_queue.put(msg)
w.checks = []
w.returns_queue = from_queue
w.s = to_queue
w.c = control_queue
# Now we simulate the Worker's work() routine. We can't call it
# as w.work() because it is an endless loop
for i in xrange(1,10):
w.get_new_checks()
# During the first loop the sleeping command is launched
w.launch_new_checks()
w.manage_finished_checks()
time.sleep(1)
# The worker should have finished it's job now, either correctly or
# with a timeout
o = from_queue.get()
self.assert_(o.status == 'timeout')
self.assert_(o.exit_status == 3)
self.assert_(o.execution_time < n.timeout+1)
# Be a good poller and clean up.
to_queue.close()
control_queue.close()
# Now look what the scheduler says to all this
self.sched.actions[n.id] = n
self.sched.put_results(o)
self.assert_(self.any_log_match("Warning: Contact mr.schinken service notification command 'libexec/sleep_command.sh 7 ' timed out after 2 seconds"))
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000019 |
@@ -3497,16 +3497,17 @@
%22Warning
+
: Contac
|
ca99e80e04a1d7fb3ff3698f23cdc19c8ec16113
|
add refresh test
|
refresh_test.py
|
refresh_test.py
|
Python
| 0 |
@@ -0,0 +1,2226 @@
+#!/usr/bin/env python%0A%0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Affero General Public License as published by%0A# the Free Software Foundation; either version 3 of the License, or%0A# (at your option) any later version.%0A#%0A# This program is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.%0A#%0A# See LICENSE for more details.%0A#%0A# Copyright (c) 2017 ScyllaDB%0A%0Aimport time%0A%0Afrom avocado import main%0Afrom sdcm.tester import ClusterTester%0Afrom sdcm.nemesis import RefreshMonkey%0Afrom sdcm.nemesis import RefreshBigMonkey%0A%0A%0Aclass RefreshTest(ClusterTester):%0A %22%22%22%0A Nodetool refresh after uploading lot of data to a cluster with running load in the background.%0A :avocado: enable%0A %22%22%22%0A def test_refresh_small_node(self):%0A self.db_cluster.add_nemesis(nemesis=RefreshMonkey,%0A loaders=self.loaders,%0A monitoring_set=self.monitors)%0A%0A # run a write workload%0A stress_queue = self.run_stress_thread(stress_cmd=self.params.get('stress_cmd'),%0A stress_num=2,%0A keyspace_num=1)%0A time.sleep(30)%0A self.db_cluster.start_nemesis()%0A self.db_cluster.stop_nemesis(timeout=None)%0A%0A self.get_stress_results(queue=stress_queue, stress_num=2, keyspace_num=1)%0A%0A def test_refresh_big_node(self):%0A self.db_cluster.add_nemesis(nemesis=RefreshBigMonkey,%0A loaders=self.loaders,%0A monitoring_set=self.monitors)%0A%0A # run a write workload%0A stress_queue = self.run_stress_thread(stress_cmd=self.params.get('stress_cmd'),%0A stress_num=2,%0A keyspace_num=1)%0A time.sleep(30)%0A self.db_cluster.start_nemesis()%0A self.db_cluster.stop_nemesis(timeout=None)%0A%0A self.get_stress_results(queue=stress_queue, stress_num=2, keyspace_num=1)%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
b440872f71d37cc5bf110eb0c7c13a4a2dcb7f6c
|
create utils package, field_template_read update var name to template render
|
opps/fields/utils.py
|
opps/fields/utils.py
|
Python
| 0 |
@@ -0,0 +1,236 @@
+# -*- coding: utf-8 -*-%0Adef field_template_read(obj):%0A %22%22%22Use replace because the django template can't read variable with %22-%22%0A %22%22%22%0A fields = %7B%7D%0A for o in obj:%0A fields%5Bo.replace(%22-%22, %22_%22)%5D = obj%5Bo%5D%0A%0A return fields%0A
|
|
2804024fbee6b825dec512ff13d7b28a1fee5b25
|
Add root Api object.
|
routeros_api/api.py
|
routeros_api/api.py
|
Python
| 0 |
@@ -0,0 +1,2769 @@
+import hashlib%0Aimport binascii%0Afrom routeros_api import api_communicator%0Afrom routeros_api import api_socket%0Afrom routeros_api import base_api%0A%0A%0Adef connect(host, username='admin', password='', port=8728):%0A socket = api_socket.get_socket(host, port)%0A base = base_api.Connection(socket)%0A communicator = api_communicator.ApiCommunicator(base)%0A login(communicator, username, password)%0A return RouterOsApi(communicator)%0A%0Adef login(communicator, login, password):%0A communicator.send_command('/', 'login')%0A response = communicator.receive_single_response()%0A token = binascii.unhexlify(response.attributes%5B'ret'%5D)%0A hasher = hashlib.md5()%0A hasher.update(b'%5Cx00')%0A hasher.update(password.encode())%0A hasher.update(token)%0A hashed = b'00' + hasher.hexdigest().encode('ascii')%0A communicator.call('/', 'login', %7B'name': login, 'response': hashed%7D)%0A%0Aclass RouterOsApi(object):%0A def __init__(self, communicator):%0A self.communicator = communicator%0A%0A def get_resource(self, path):%0A return RouterOsResource(self.communicator, path)%0A%0A def get_binary_resource(self, path):%0A return RouterOsResource(self.communicator, path, binary=True)%0A%0Aclass RouterOsResource(object):%0A def __init__(self, communicator, path, binary=False):%0A self.communicator = communicator%0A self.path = path%0A self.binary = binary%0A%0A def get(self, **kwargs):%0A return self.call('print', %7B%7D, kwargs)%0A%0A def get_async(self, **kwargs):%0A return self.call_async('print', %7B%7D, kwargs)%0A%0A def detailed_get(self, **kwargs):%0A return self.call('print', %7B'detail': ''%7D, kwargs)%0A%0A def detailed_get_async(self, **kwargs):%0A return self.call_async('print', %7B'detail': ''%7D, kwargs)%0A%0A def set(self, **kwargs):%0A return self.call('set', kwargs)%0A%0A def set_async(self, **kwargs):%0A return self.call('set', kwargs)%0A%0A def add(self, **kwargs):%0A return self.call('add', kwargs)%0A%0A def add_async(self, **kwargs):%0A return self.call_async('add', kwargs)%0A%0A def remove(self, **kwargs):%0A return self.call('remove', kwargs)%0A%0A def remove_async(self, **kwargs):%0A return self.call_async('remove', kwargs)%0A%0A def call(self, command, arguments=None, queries=None,%0A additional_queries=()):%0A return self.communicator.call(%0A self.path, command, arguments=arguments, queries=queries,%0A additional_queries=additional_queries, binary=self.binary)%0A%0A def call_async(self, command, arguments=None, queries=None,%0A additional_queries=()):%0A return self.communicator.call_async(%0A self.path, command, arguments=arguments, queries=queries,%0A additional_queries=additional_queries, binary=self.binary)%0A
|
|
52f715af4b1cf6dd964e71cafdf807d1133fe717
|
add a basic script that tests nvlist_in and nvlist_out functionality
|
tests/test_nvlist.py
|
tests/test_nvlist.py
|
Python
| 0.000001 |
@@ -0,0 +1,841 @@
+import json%0Aimport math%0Afrom libzfs_core.nvlist import *%0Afrom libzfs_core.nvlist import _lib%0A%0Aprops_in = %7B%0A%09%22key1%22: %22str%22,%0A%09%22key2%22: 10,%0A%09%22key3%22: %7B%0A%09%09%22skey1%22: True,%0A%09%09%22skey2%22: None,%0A%09%09%22skey3%22: %5B%0A%09%09%09True,%0A%09%09%09False,%0A%09%09%09True%0A%09%09%5D%0A%09%7D,%0A%09%22key4%22: %5B%0A%09%09%22ab%22,%0A%09%09%22bc%22%0A%09%5D,%0A%09%22key5%22: %5B%0A%09%09int(math.pow(2, 62)),%0A%09%091,%0A%09%092,%0A%09%093%0A%09%5D,%0A%09%22key6%22: %5B%0A%09%09uint32_t(10),%0A%09%09uint32_t(11)%0A%09%5D,%0A%09%22key7%22: %5B%0A%09%09%7B%0A%09%09%09%22skey71%22: %22a%22,%0A%09%09%09%22skey72%22: %22b%22,%0A%09%09%7D,%0A%09%09%7B%0A%09%09%09%22skey71%22: %22c%22,%0A%09%09%09%22skey72%22: %22d%22,%0A%09%09%7D,%0A%09%09%7B%0A%09%09%09%22skey71%22: %22e%22,%0A%09%09%09%22skey72%22: %22f%22,%0A%09%09%7D%0A%0A%09%5D%0A%7D%0A%0Aprops_out = %7B%7D%0A%0Awith nvlist_in(props_in) as x:%0A%09print %22Dumping a C nvlist_t produced from a python dictionary:%22%0A%09_lib.dump_nvlist(x, 2)%0A%0A%09with nvlist_out(props_out) as y:%0A%09%09_lib.nvlist_dup(x, y, 0)%0A%09print %22%5Cn%5Cn%22%0A%09print %22Dumping a dictionary reconstructed from the nvlist_t:%22%0A%09print json.dumps(props_out, sort_keys=True, indent=4)%0A%0A
|
|
5348379759caa9576c3194ae0795e2fcc6ed3308
|
add unit tests
|
tests/test_region.py
|
tests/test_region.py
|
Python
| 0.000001 |
@@ -0,0 +1,2354 @@
+# -*- coding: utf-8 -*-%0Afrom cooler.region import *%0Aimport nose%0A%0A%0Adef test_bool_ops():%0A a, b = parse_region(('chr1', 5, 10)), parse_region(('chr1', 15, 20))%0A assert comes_before(a, b) == True%0A assert comes_after(a, b) == False%0A assert contains(a, b) == False%0A assert overlaps(a, b) == False%0A%0A a, b = parse_region(('chr1', 5, 10)), parse_region(('chr1', 10, 20))%0A assert comes_before(a, b) == True%0A assert comes_after(a, b) == False%0A assert contains(a, b) == False%0A assert overlaps(a, b) == False%0A%0A a, b = parse_region(('chr1', 5, 10)), parse_region(('chr1', 6, 10))%0A assert comes_before(a, b) == True%0A assert comes_after(a, b) == False%0A assert contains(a, b) == False%0A assert overlaps(a, b) == True%0A%0A a, b = parse_region(('chr1', 5, 10)), parse_region(('chr1', 5, 10))%0A assert comes_before(a, b) == False%0A assert comes_after(a, b) == False%0A assert contains(a, b) == False%0A assert overlaps(a, b) == True%0A%0A a, b = parse_region(('chr1', 5, 10)), parse_region(('chr1', 0, 6))%0A assert comes_before(a, b) == False%0A assert comes_after(a, b) == True%0A assert contains(a, b) == False%0A assert overlaps(a, b) == True%0A%0A a, b = parse_region(('chr1', 5, 10)), parse_region(('chr1', 0, 5))%0A assert comes_before(a, b) == False%0A assert comes_after(a, b) == True%0A assert contains(a, b) == False%0A assert overlaps(a, b) == False%0A%0A a, b = parse_region(('chr1', 5, 10)), parse_region(('chr1', 0, 15))%0A assert comes_before(a, b) == False%0A assert comes_after(a, b) == False%0A assert contains(a, b) == False%0A assert overlaps(a, b) == True%0A%0A%0Adef test_set_ops():%0A a, b = parse_region(('chr1', 5, 15)), parse_region(('chr1', 10, 20))%0A assert intersection(a, b) == Region('chr1', 10, 15)%0A%0A a, b = parse_region(('chr1', 5, 15)), parse_region(('chr1', 10, 20))%0A assert union(a, b) == Region('chr1', 5, 20)%0A%0A a, b = parse_region(('chr1', 5, 10)), parse_region(('chr1', 15, 20))%0A assert hull(a, b) == Region('chr1', 5, 20)%0A%0A a, b = parse_region(('chr1', 5, 15)), parse_region(('chr1', 10, 20))%0A assert diff(a, b) == Region('chr1', 5, 10)%0A%0A a, b = parse_region(('chr1', 5, 15)), parse_region(('chr1', 10, 20))%0A x, y, z = partition(a, b)%0A assert x == Region('chr1', 5, 10)%0A assert y == Region('chr1', 10, 15)%0A assert z == Region('chr1', 15, 20)%0A%0A
|
|
c9fc6d4f98ba102d94fa54eedae6a50d38459d71
|
add test_invalid_files to test_schema
|
tests/test_schema.py
|
tests/test_schema.py
|
Python
| 0.000001 |
@@ -0,0 +1,2280 @@
+import os%0Aimport jsonschema%0Aimport json%0Aimport pathlib%0Aimport copy%0A%0A%0Adef get_example_json(filebase):%0A rootdir = pathlib.Path(__file__).resolve().parent.parent%0A jsonfilepath = str(rootdir / 'examples' / f'%7Bfilebase%7D.json')%0A with open(jsonfilepath) as f:%0A js = json.load(f)%0A return js%0A%0A%0Adef get_json_schema():%0A this_path = os.path.dirname(os.path.abspath(__file__))%0A schema_path = os.path.join(os.path.dirname(this_path), 'hescorehpxml', 'schemas', 'hescore_json.schema.json')%0A with open(schema_path, 'r') as js:%0A schema = json.loads(js.read())%0A return schema%0A%0A%0Adef get_error_messages(jsonfile, jsonschema):%0A errors = %5B%5D%0A for error in sorted(jsonschema.iter_errors(jsonfile), key=str):%0A errors.append(error.message)%0A return errors%0A%0A%0Adef test_schema_version_validation():%0A schema = get_json_schema()%0A error = jsonschema.Draft7Validator.check_schema(schema)%0A assert error is None%0A%0A%0Adef test_invalid_files():%0A hpxml_filebase = 'townhouse_walls'%0A schema = get_json_schema()%0A js_schema = jsonschema.Draft7Validator(schema)%0A js = get_example_json(hpxml_filebase)%0A%0A js1 = copy.deepcopy(js)%0A del js1%5B'building'%5D%5B'about'%5D%5B'town_house_walls'%5D%0A errors = get_error_messages(js1, js_schema)%0A assert %22'town_house_walls' is a required property%22 in errors%0A%0A js2 = copy.deepcopy(js)%0A js2_about = copy.deepcopy(js%5B'building'%5D%5B'about'%5D)%0A del js2%5B'building'%5D%5B'about'%5D%0A js2%5B'building'%5D%5B'about'%5D = %5B%5D%0A js2%5B'building'%5D%5B'about'%5D.append(js2_about)%0A js2%5B'building'%5D%5B'about'%5D.append(js2_about)%0A errors = get_error_messages(js2, js_schema)%0A assert any(error.startswith(%22%5B%7B'assessment_date': '2014-12-02', 'shape': 'town_house'%22) and%0A error.endswith(%22is not of type 'object'%22) for error in errors)%0A%0A js3 = copy.deepcopy(js)%0A js3_zone = copy.deepcopy(js%5B'building'%5D%5B'zone'%5D)%0A del js3%5B'building'%5D%5B'zone'%5D%0A js3%5B'building'%5D%5B'zone'%5D = %5B%5D%0A js3%5B'building'%5D%5B'zone'%5D.append(js3_zone)%0A js3%5B'building'%5D%5B'zone'%5D.append(js3_zone)%0A errors = get_error_messages(js3, js_schema)%0A assert any(error.startswith(%22%5B%7B'zone_roof': %5B%7B'roof_name': 'roof1', 'roof_area': 1200.0%22) and%0A error.endswith(%22is not of type 'object'%22) for error in errors)%0A%0A # TODO: Add more tests
|
|
5e4fd7fb37f9e16d27a7751221f6e3725509f2fc
|
Prepare to use unittests
|
tests/testapi.py
|
tests/testapi.py
|
Python
| 0 |
@@ -0,0 +1,1273 @@
+#!/usr/bin/python%0A%0Afrom fortigateconf import FortiOSConf%0Aimport sys%0Aimport json%0Aimport pprint%0Aimport json%0Afrom argparse import Namespace%0Aimport logging%0Aformatter = logging.Formatter(%0A '%25(asctime)s %25(name)-12s %25(levelname)-8s %25(message)s')%0Alogger = logging.getLogger('fortinetconflib')%0Ahdlr = logging.FileHandler('/var/tmp/testapi.log')%0Ahdlr.setFormatter(formatter)%0Alogger.addHandler(hdlr) %0Alogger.setLevel(logging.DEBUG)%0A%0Alogger.debug('often makes a very good meal of %25s', 'visiting tourists')%0A%0Afgt = FortiOSConf()%0A%0Adef json2obj(data):%0A return json.loads(data, object_hook=lambda d: Namespace(**d))%0A%0A%0Adef main():%0A # Login to the FGT ip%0A fgt.debug('on')%0A fgt.login('192.168.40.8','admin','')%0A data = %7B%0A # %22action%22 : %22add%22,%0A %22seq-num%22 :%228%22,%0A %22dst%22: %2210.10.30.0 255.255.255.0%22,%0A %22device%22: %22port2%22,%0A %22gateway%22: %22192.168.40.254%22,%0A %7D%0A pp = pprint.PrettyPrinter(indent=4)%0A d=json2obj(json.dumps(data))%0A pp.pprint(fgt.get_name_path_dict( vdom=%22root%22))%0A # resp = fgt.schema('diagnose__tree__','debug', vdom=%22root%22)%0A # pp.pprint(resp)%0A resp = fgt.post('diagnose__tree__','debug', vdom=%22root%22, mkey=%22enable%22)%0A %0A pp.pprint(resp)%0A%0A fgt.logout()%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
e57a73ac2c1a22d97ce40a8954ecb44e3b92a53c
|
increase to 100%
|
lob/api_requestor.py
|
lob/api_requestor.py
|
import requests
import lob
import json
import resource
from lob import error
from version import VERSION
def _is_file_like(obj):
"""
Checks if an object is file-like enough to be sent to requests.
In particular, file, StringIO and cStringIO objects are file-like.
Refs http://stackoverflow.com/questions/3450857/python-determining-if-an-object-is-file-like
"""
return hasattr(obj, 'read') and hasattr(obj, 'seek')
class APIRequestor(object):
def __init__(self, key=None):
self.api_key = key or lob.api_key
def parse_response(self, resp):
payload = json.loads(resp.content)
if resp.status_code == 200:
return payload
elif resp.status_code == 401:
raise error.AuthenticationError(payload['errors'][0]['message'],
resp.content, resp.status_code, resp)
elif resp.status_code in [404, 422]:
raise error.InvalidRequestError(payload['errors'][0]['message'],
resp.content, resp.status_code, resp)
else:
#pragma: no cover
raise error.APIError(payload['errors'][0]['message'], resp.content, resp.status_code, resp) # pragma: no cover
def request(self, method, url, params=None):
headers = {
'User-Agent': 'Lob/v1 PythonBindings/%s' % VERSION
}
if hasattr(lob, 'api_version'):
headers['Lob-Version'] = lob.api_version
if method == 'get':
return self.parse_response(
requests.get(lob.api_base + url, auth=(self.api_key, ''), params=params, headers=headers)
)
elif method == 'delete':
return self.parse_response(
requests.delete(lob.api_base + url, auth=(self.api_key, ''), headers=headers)
)
elif method == 'post':
data = {}
files = params.pop('files', {})
explodedParams = {}
for k,v in params.iteritems():
if isinstance(v, dict) and not isinstance(v, resource.LobObject):
for k2,v2 in v.iteritems():
explodedParams[k + '[' + k2 + ']'] = v2
else:
explodedParams[k] = v
for k,v in explodedParams.iteritems():
if _is_file_like(v):
files[k] = v
else:
if isinstance(v, resource.LobObject):
data[k] = v.id
else:
data[k] = v
return self.parse_response(
requests.post(lob.api_base + url, auth=(self.api_key, ''), data=data, files=files, headers=headers)
)
|
Python
| 0.000013 |
@@ -1040,28 +1040,16 @@
else:
-%0A
#pragma
@@ -1166,27 +1166,8 @@
esp)
- # pragma: no cover
%0A%0A
|
cd653c3657aa14d3845a253d916e9f0d336910ce
|
add logger convenience class
|
loggerglue/logger.py
|
loggerglue/logger.py
|
Python
| 0 |
@@ -0,0 +1,2627 @@
+# -*- coding: utf-8 -*-%0A%22%22%22%0AAn rfc5424/rfc5425 syslog server implementation%0ACopyright %C2%A9 2011 Evax Software %[email protected]%3E%0A%22%22%22%0Aimport socket,os,sys%0Afrom datetime import datetime%0A%0Afrom loggerglue.rfc5424 import DEFAULT_PRIVAL,SyslogEntry%0Afrom loggerglue.emitter import UNIXSyslogEmitter%0A%0Aclass Logger(object):%0A %22%22%22%0A Convenience class to log RFC5424 messages to the%0A local syslog daemon.%0A %22%22%22%0A%0A def __init__(self, emitter=None, hostname=None, app_name=None, procid=None):%0A %22%22%22%0A Create a new logger object.%0A %0A Keyword arguments:%0A emitter -- Emitter object to send syslog messages, default to Unix socket /dev/log%0A hostname -- Hostname to send with log messages, defaults to current hostname%0A app_name -- Application name to send with log messages, defaults to application name%0A procid -- Process ID to send with log messages, default to current process ID%0A %22%22%22%0A if hostname is None:%0A # Compute host name to submit to syslog%0A hostname = socket.gethostname()%0A%0A if app_name is None:%0A # Compute default app name from name of executable,%0A # without extension.%0A app_name = os.path.basename(sys.argv%5B0%5D)%0A (app_name, _, _) = app_name.partition(%22.%22) %0A%0A if procid is None:%0A procid = os.getpid()%0A%0A if emitter is None:%0A emitter = UNIXSyslogEmitter()%0A%0A self.hostname = hostname%0A self.app_name = app_name%0A self.procid = procid%0A self.emitter = emitter%0A%0A def log(self, msg=None, msgid=None, structured_data=None, prival=DEFAULT_PRIVAL, %0A timestamp=None):%0A %22%22%22%0A Log a message. %0A%0A Example:%0A %3E%3E%3E logger.log(%22test%22, prival=LOG_DEBUG%7CLOG_MAIL)%0A%0A Keyword arguments:%0A msg -- Human readable message to log%0A msgid -- Message identifier%0A structured_data -- Structured data to attach to log message%0A prival -- Priority and facility of message (defaults to INFO%7CUSER)%0A timestamp -- UTC time of log message (default to current time)%0A %22%22%22%0A if timestamp is None:%0A timestamp = datetime.utcnow()%0A%0A msg = SyslogEntry(%0A prival=prival, timestamp=datetime.utcnow(), %0A hostname=self.hostname, app_name=self.app_name, procid=self.procid, msgid=msgid,%0A structured_data=structured_data, %0A msg=msg%0A )%0A%0A self.emitter.emit(msg)%0A%0A def close(self):%0A %22%22%22%0A Close connection to logger.%0A %22%22%22%0A self.emitter.close()%0A%0A
|
|
1ac75fafc9c67e0fc1f898f4653593730ed66326
|
Create uber.py
|
modules/uber.py
|
modules/uber.py
|
Python
| 0.000036 |
@@ -0,0 +1,50 @@
+def uber(self):%0A self.send_chan(%22Prkl, toimii!%22)%0A
|
|
7b8d7bf81b094f554f3d820b1e0df5d54917f4c0
|
Create getCITask.py
|
src/main/resources/xlr_xldeploy/getCITask.py
|
src/main/resources/xlr_xldeploy/getCITask.py
|
Python
| 0.000001 |
@@ -0,0 +1,1389 @@
+#%0A# Copyright 2017 XEBIALABS%0A#%0A# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the %22Software%22), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:%0A#%0A# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.%0A#%0A# THE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.%0A#%0A%0Afrom xlr_xldeploy.XLDeployClientUtil import XLDeployClientUtil%0A%0Axld_client = XLDeployClientUtil.create_xldeploy_client(xldeployServer, username, password)%0A%0Atest = xld_client.check_ci_exist(ciID)%0A%0Aif throwOnFail and not test:%0A raise Exception(ciID + %22 does not exist%22)%0A%0Aelse:%0A response = xld_client.get_ci(ciID,accept)%0A
|
|
71fa375ac07ed23e7105f253fc45fff98890c1c4
|
document :- Bug #539644 - backward compatibility for file utility
|
addons/document/content_index.py
|
addons/document/content_index.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import os
import tempfile
class NhException(Exception):
pass
from subprocess import Popen, PIPE
class indexer():
""" An indexer knows how to parse the content of some file.
Typically, one indexer should be instantiated per file
type.
Override this class to add more functionality. Note that
you should only override the Content or the File methods
that give an optimal result. """
def _getMimeTypes(self):
""" Return supported mimetypes """
return []
def _getExtensions(self):
return []
def _getDefMime(self,ext):
""" Return a mimetype for this document type, ideally the
closest to the extension ext. """
mts = self._getMimeTypes();
if len (mts):
return mts[0]
return None
def indexContent(self,content,filename=None, realfile = None):
""" Use either content or the real file, to index.
Some parsers will work better with the actual
content, others parse a file easier. Try the
optimal.
"""
res = ''
try:
if content != None:
return self._doIndexContent(content)
except NhException:
pass
if realfile != None:
try:
return self._doIndexFile(realfile)
except NhException:
pass
fp = open(realfile,'rb')
content2 = fp.read()
fp.close()
# The not-handled exception may be raised here
return self._doIndexContent(content2)
# last try, with a tmp file
if content:
try:
fname,ext = filename and os.path.splitext(filename) or ('','')
fd, rfname = tempfile.mkstemp(suffix=ext)
os.write(fd, content)
os.close(fd)
res = self._doIndexFile(rfname)
os.unlink(rfname)
return res
except NhException:
pass
raise NhException('No appropriate method to index file')
def _doIndexContent(self,content):
raise NhException("Content not handled here")
def _doIndexFile(self,fpath):
raise NhException("Content not handled here")
def mime_match(mime, mdict):
if mdict.has_key(mime):
return (mime, mdict[mime])
if '/' in mime:
mpat = mime.split('/')[0]+'/*'
if mdict.has_key(mpat):
return (mime, mdict[mpat])
return (None, None)
class contentIndex():
__logger = logging.getLogger('addons.document.content_index')
def __init__(self):
self.mimes = {}
self.exts = {}
def register(self, obj):
f = False
for mime in obj._getMimeTypes():
self.mimes[mime] = obj
f = True
for ext in obj._getExtensions():
self.exts[ext] = obj
f = True
if f:
self.__logger.debug('Register content indexer: %s', obj)
if not f:
raise Exception("Your indexer should at least suport a mimetype or extension")
def doIndex(self,content, filename=None, content_type=None, realfname = None, debug=False):
fobj = None
fname = None
mime = None
if content_type and self.mimes.has_key(content_type):
mime = content_type
fobj = self.mimes[content_type]
elif filename:
bname,ext = os.path.splitext(filename)
if self.exts.has_key(ext):
fobj = self.exts[ext]
mime = fobj._getDefMime(ext)
if content_type and not fobj:
mime,fobj = mime_match(content_type, self.mimes)
if not fobj:
try:
if realfname :
fname = realfname
else:
bname,ext = os.path.splitext(filename)
fd, fname = tempfile.mkstemp(suffix=ext)
os.write(fd, content)
os.close(fd)
#fp = Popen(['file','-b','--mime-type',fname], shell=False, stdout=PIPE).stdout
fp = Popen(['file','-b',fname], shell=False, stdout=PIPE).stdout
result = fp.read()
fp.close()
mime2 = result.strip()
self.__logger.debug('File gave us: %s', mime2)
# Note that the temporary file still exists now.
mime,fobj = mime_match(mime2, self.mimes)
if not mime:
mime = mime2
except Exception:
self.__logger.exception('Cannot determine mime type')
try:
if fobj:
res = (mime, fobj.indexContent(content,filename,fname or realfname) )
else:
self.__logger.debug("Have no object, return (%s, None)", mime)
res = (mime, None )
except Exception:
self.__logger.exception("Could not index file %s (%s)",
filename, fname or realfname)
res = None
# If we created a tmp file, unlink it now
if not realfname and fname:
try:
os.unlink(fname)
except Exception:
self.__logger.exception("Could not unlink %s", fname)
return res
cntIndex = contentIndex()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Python
| 0 |
@@ -5234,17 +5234,16 @@
-#
fp = Pop
@@ -5269,94 +5269,8 @@
mime
--type',fname%5D, shell=False, stdout=PIPE).stdout%0A fp = Popen(%5B'file','-b
',fn
@@ -5406,14 +5406,20 @@
lt.s
-trip()
+plit(';')%5B0%5D
%0A
|
64bb565ad1a7785fdd4a65f92ba26f73425c4e63
|
Add a hack so we are still able to answer to users requesting "tor-browser-bundle"
|
lib/gettor/requests.py
|
lib/gettor/requests.py
|
# Copyright (c) 2008 - 2011, Jacob Appelbaum <[email protected]>,
# Christian Fromme <[email protected]>
# This is Free Software. See LICENSE for license information.
import sys
import email
import re
import logging
import gettor.utils
import gettor.packages
import gettor.filters
class requestMail:
def __init__(self, config):
""" Read message from stdin, try to assign some values already.
"""
# Read email from stdin
self.rawMessage = sys.stdin.read()
self.parsedMessage = email.message_from_string(self.rawMessage)
self.config = config
self.request = {}
self.request['user'] = self.parsedMessage["Return-Path"]
# Normalize address before hashing
normalized_addr = gettor.utils.normalizeAddress(self.request['user'])
self.request['hashed_user'] = gettor.utils.getHash(normalized_addr)
self.request['ouraddr'] = self.getRealTo(self.parsedMessage["to"])
self.request['locale'] = self.getLocaleInTo(self.request['ouraddr'])
self.request['package'] = None
self.request['split'] = False
self.request['forward'] = None
self.request['valid'] = False # This will get set by gettor.filters
def getRealTo(self, toField):
"""If someone wrote to `[email protected]', the `From:' field
in our reply should reflect that. So, use the `To:' field from the
incoming mail, but filter out everything except the gettor@ address.
"""
regexGettor = '.*(<)?(gettor.*@.*torproject.org)+(?(1)>).*'
toField = gettor.filters.doToAddressHack(toField)
logging.debug("toField: %s" % toField)
match = re.match(regexGettor, toField)
if match:
return match.group(2)
else:
# Fall back to default From: address
return self.config.MAIL_FROM
def getLocaleInTo(self, address):
"""See whether the user sent his email to a 'plus' address, for
instance to gettor+fa@tpo. Plus addresses are the current
mechanism to set the reply language
"""
regexPlus = '.*(<)?(\w+\+(\w+)@\w+(?:\.\w+)+)(?(1)>)'
match = re.match(regexPlus, address)
if match:
locale = match.group(3)
logging.debug("User requested language %s" % locale)
return self.checkAndGetLocale(locale)
else:
logging.debug("Not a 'plus' address")
return self.config.DEFAULT_LOCALE
def parseMail(self):
"""Main mail parsing routine. Returns a RequestVal value class
"""
if self.parsedMessage.is_multipart():
for part in self.parsedMessage.walk():
if part.get_content_maintype() == "text":
# We found a text part, parse it
self.parseTextPart(part.get_payload(decode=1))
else:
# Not a multipart message, just parse along what we've got
self.parseTextPart(self.parsedMessage.get_payload(decode=1))
if self.request['package'] is None:
logging.debug("User didn't select any packages")
return self.request
def parseTextPart(self, text):
"""If we've found a text part in a multipart email or if we just want
to parse a non-multipart message, this is the routine to call with
the text body as its argument
"""
lines = gettor.utils.stripHTMLTags(text).split('\n')
for line in lines:
if self.request['package'] is None:
self.request['package'] = self.matchPackage(line)
if self.request['split'] is False:
self.request['split'] = self.matchSplit(line)
if self.request['forward'] is None:
self.request['forward'] = self.matchForwardCommand(line)
def matchPackage(self, line):
"""Look up which package the user is requesting.
"""
for p in self.config.PACKAGES.keys():
matchme = ".*" + p + ".*"
match = re.match(matchme, line, re.DOTALL)
if match:
logging.debug("User requested package %s" % p)
return p
return None
def matchSplit(self, line):
"""If we find 'split' somewhere we assume that the user wants a split
delivery
"""
match = re.match("\s*split.*", line, re.DOTALL)
if match:
logging.debug("User requested a split delivery")
return True
else:
return False
def matchForwardCommand(self, line):
"""Check if we have a command from the GetTor admin in this email.
Command lines always consists of the following syntax:
'Command: <password> <command part 1> <command part 2>'
For the forwarding command, part 1 is the email address of the
recipient, part 2 is the package name of the package that needs
to be forwarded.
The password is checked against the password found in the file
configured as cmdPassFile in the GetTor configuration.
"""
match = re.match(".*[Cc]ommand:\s+(.*)$", line, re.DOTALL)
if match:
logging.debug("Command received from %s" % self.request['user'])
cmd = match.group(1).split()
length = len(cmd)
assert length == 3, "Wrong command syntax"
auth = cmd[0]
# Package is parsed by the usual package parsing mechanism
package = cmd[1]
address = cmd[2]
verified = gettor.utils.verifyPassword(self.config, auth)
assert verified == True, \
"Unauthorized attempt to command from: %s" \
% self.request['user']
return address
else:
return None
def checkAndGetLocale(self, locale):
"""Look through our aliases list for languages and check if the user
requested an alias rather than an 'official' language name. If he
does, map back to that official name. Also, if the user didn't
request a language we support, fall back to default.
"""
for (lang, aliases) in self.config.SUPP_LANGS.items():
if lang == locale:
return locale
if aliases is not None:
if locale in aliases:
logging.debug("Request for %s via alias %s" % (lang, locale))
# Return the 'official' name
return lang
else:
logging.debug("Requested language %s not supported. Fallback: %s" \
% (self.replyLocale, self.config.DEFAULT_LOCALE))
self.replyLocale = self.config.DEFAULT_LOCALE
return self.config.DEFAULT_LOCALE
def getRawMessage(self):
return self.rawMessage
|
Python
| 0 |
@@ -3989,32 +3989,267 @@
ng.%0A %22%22%22%0A
+ # XXX HACK ALERT: This makes it possible for users to still request%0A # the windows bundle by its old name%0A packages_hacked = self.config.PACKAGES%0A packages_hacked%5B'tor-browser-bundle'%5D = ()%0A
for p in
|
116babc38e2e4023eb0b45eabc02050ed433e240
|
Include a helpful MOD analyser script
|
scripts/mod_info.py
|
scripts/mod_info.py
|
Python
| 0 |
@@ -0,0 +1,1262 @@
+# mod_info.py%0A# %0A# Display information about a Protracker module.%0A# %0A# Written & released by Keir Fraser %[email protected]%3E%0A# %0A# This is free and unencumbered software released into the public domain.%0A# See the file COPYING for more details, or visit %3Chttp://unlicense.org%3E.%0A%0Aimport struct, sys%0A%0Awith open(sys.argv%5B1%5D, %22rb%22) as f:%0A dat = f.read()%0Adlen = len(dat)%0A%0Atname, = struct.unpack(%2220s%22, dat%5B:20%5D)%0Aprint(%22Name: '%25s'%22 %25 tname.decode('utf-8'))%0Adat = dat%5B20:%5D%0Asamples_len = 0%0Afor i in range(31):%0A name, wordlen, finetune, volume, repstart, replen = struct.unpack(%0A %22%3E22sH2B2H%22, dat%5B:30%5D)%0A dat = dat%5B30:%5D%0A if wordlen == 0:%0A continue%0A samples_len += wordlen*2%0Aprint(%22Sample Data: %25u%22 %25 samples_len)%0A%0Asonglen, pad = struct.unpack(%222B%22, dat%5B:2%5D)%0Adat = dat%5B2:%5D%0A#assert pad == 127%0Aassert songlen %3C= 128%0Aprint(%22Song Length: %25u%22 %25 songlen)%0A%0Apatterns = list(struct.unpack(%22128B%22, dat%5B:128%5D))%0Adat = dat%5B128:%5D%0Apatterns = patterns%5B:songlen%5D%0Anr_patterns = max(patterns)+1%0Aprint(%22Nr Patterns: %25u (%25u bytes)%22 %25 (nr_patterns, nr_patterns*1024))%0A%0Amksig, = struct.unpack(%224s%22, dat%5B:4%5D)%0Adat = dat%5B4:%5D%0Aassert mksig == b'M.K.'%0A%0Atotlen = 1084 + nr_patterns*1024 + samples_len%0Aprint(%22Total Bytes: %25u (0x%25x)%22 %25 (totlen, totlen))%0Aassert totlen %3C= dlen%0A%0A
|
|
e9576468046fd53195f139f5751c9d45f26c51c4
|
handle NER exceptions.
|
aleph/analyze/polyglot_entity.py
|
aleph/analyze/polyglot_entity.py
|
from __future__ import absolute_import
import logging
from collections import defaultdict
from polyglot.text import Text
from aleph.core import db
from aleph.model import Reference, Entity, Collection
from aleph.model.entity_details import EntityIdentifier
from aleph.analyze.analyzer import Analyzer
log = logging.getLogger(__name__)
SCHEMAS = {
'I-PER': '/entity/person.json#',
'I-ORG': '/entity/organization.json#'
}
DEFAULT_SCHEMA = '/entity/entity.json#'
class PolyglotEntityAnalyzer(Analyzer):
origin = 'polyglot'
def prepare(self):
self.disabled = not self.document.source.generate_entities
self.entities = defaultdict(list)
def on_text(self, text):
if text is None or len(text) <= 100:
return
text = Text(text)
if len(self.meta.languages) == 1:
text.hint_language_code = self.meta.languages[0]
for entity in text.entities:
if entity.tag == 'I-LOC':
continue
parts = [t for t in entity if t.lower() != t.upper()]
if len(parts) < 2:
continue
entity_name = ' '.join(parts)
if len(entity_name) < 5 or len(entity_name) > 150:
continue
schema = SCHEMAS.get(entity.tag, DEFAULT_SCHEMA)
self.entities[entity_name].append(schema)
def load_collection(self):
if not hasattr(self, '_collection'):
self._collection = Collection.by_foreign_id('polyglot:ner', {
'label': 'Automatically Extracted Persons and Companies',
'public': True
})
return self._collection
def load_entity(self, name, schema):
q = db.session.query(EntityIdentifier)
q = q.order_by(EntityIdentifier.deleted_at.desc().nullsfirst())
q = q.filter(EntityIdentifier.scheme == self.origin)
q = q.filter(EntityIdentifier.identifier == name)
ident = q.first()
if ident is not None:
if ident.deleted_at is None:
return ident.entity_id
if ident.entity.deleted_at is None:
return None
data = {
'name': name,
'$schema': schema,
'state': Entity.STATE_PENDING,
'identifiers': [{
'scheme': self.origin,
'identifier': name
}],
'collections': [self.load_collection()]
}
entity = Entity.save(data)
return entity.id
def finalize(self):
output = []
for entity_name, schemas in self.entities.items():
schema = max(set(schemas), key=schemas.count)
output.append((entity_name, len(schemas), schema))
Reference.delete_document(self.document.id, origin=self.origin)
for name, weight, schema in output:
entity_id = self.load_entity(name, schema)
if entity_id is None:
continue
ref = Reference()
ref.document_id = self.document.id
ref.entity_id = entity_id
ref.origin = self.origin
ref.weight = weight
db.session.add(ref)
log.info('Polyglot extraced %s entities.', len(output))
|
Python
| 0 |
@@ -759,16 +759,33 @@
return%0A
+ try:%0A
@@ -806,24 +806,28 @@
xt)%0A
+
+
if len(self.
@@ -848,16 +848,20 @@
) == 1:%0A
+
@@ -913,32 +913,36 @@
ages%5B0%5D%0A
+
+
for entity in te
@@ -958,32 +958,36 @@
es:%0A
+
if entity.tag ==
@@ -996,16 +996,20 @@
I-LOC':%0A
+
@@ -1037,16 +1037,20 @@
+
parts =
@@ -1103,24 +1103,28 @@
+
if len(parts
@@ -1138,32 +1138,36 @@
+
+
continue%0A
@@ -1151,32 +1151,36 @@
continue%0A
+
enti
@@ -1213,24 +1213,28 @@
+
+
if len(entit
@@ -1280,32 +1280,36 @@
+
continue%0A
@@ -1293,32 +1293,36 @@
continue%0A
+
sche
@@ -1370,32 +1370,36 @@
MA)%0A
+
self.entities%5Ben
@@ -1423,16 +1423,94 @@
(schema)
+%0A except Exception as ex:%0A log.warning('NER failed: %25r', ex)
%0A%0A de
|
9e7acd4e7d80cffb0274e3a01aee517fb63d3db9
|
Create Josuel_Concordance.py
|
Josuel_Concordance.py
|
Josuel_Concordance.py
|
Python
| 0 |
@@ -0,0 +1,2281 @@
+# Author: Josuel Musambaghani%0A%0A# library that breaks text into parts%0Aimport nltk%0Aimport string%0A%0Awith open('c:/Python27/fileIn.txt', 'r') as in_file:%0A text = in_file.read()%0A f = nltk.sent_tokenize(text)%0A%0A%0A# This code deals with the proble of parenthesis%0Afor item in range(len(f)-1):%0A%09if '(' in f%5Bitem%5D and ')' in f%5Bitem+1%5D:%0A%09%09f%5Bitem%5D += ' ' + f%5Bitem+1%5D%0A%09%09f.remove(f%5Bitem+1%5D)%0A%0A'''%0A# This code solve the problem of having punctuations appended to words%0A# when running. For example 'english:' and 'english' that might be consider%0A# as different because of the punctuation mark%0A%0Apunctuations = %5B'.', ':', ':', %22'%22, ',', '...', '?', '!', '~'%5D%0Ag = %5B%5D%0Afor elt in f:%0A for mark in punctuations:%0A if mark in elt:%0A z = elt.split(mark)%0A new = z%5B0%5D + z%5B1%5D%0A g.append(new)%0A%0Aprint g%0A%0A################################################################%0A%0Afor elt in f:%0A for let in elt%5Blen(elt)-2:%5D:%0A if let in string.punctuation:%0A elt = elt.replace(let, %22%22)%0A%0Afor elt in f:%0A for let in elt%5B:1%5D:%0A if let in string.punctuation:%0A elt = elt.replace(let, %22%22)%0A%0Aprint f%0A%0A'''%0A%0A# count and display results of counted words%0AmyDict = %7B%7D%0Alinenum = -1%0A%0Afor line in f:%0A line = line.strip()%0A line = line.lower()%0A line = line.split()%0A linenum += 1%0A for word in line:%0A###################################################%0A# Trying to eliminate punctuations that are appended to words%0A if word in string.punctuation:%0A line.remove(word)%0A%0A for elt in word%5Blen(word)-2:%5D:%0A if %22e.g.%22 in word:%0A continue%0A elif elt in string.punctuation:%0A word = word.replace(elt, %22%22)%0A%0A for elt in word%5B:1%5D:%0A if elt in string.punctuation:%0A word = word.replace(elt, %22%22) %0A%0A###################################################%0A# the code continues as normal ...%0A%0A word = word.strip()%0A word = word.lower()%0A%0A if not word in myDict:%0A myDict%5Bword%5D = %5B%5D%0A myDict%5Bword%5D.append(linenum)%0A%0Aprint %22%25-15s %255s %25s%22 %25(%22Word%22, 'Count', %22Line Numbers%22)%0A%0Aprint %22%25-15s %255s %25s%22 %25(%22====%22, '=====', %22============%22)%0A%0Afor key in sorted(myDict):%0A print '%25-15s %255d: %25s' %25 (key, len(myDict%5Bkey%5D), myDict%5Bkey%5D)%0A%0A
|
|
2032a823b2dad6f7cebb63ee276bcfb6ea02b7a0
|
improve notes
|
notes/code/lolviz.py
|
notes/code/lolviz.py
|
Python
| 0.000001 |
@@ -0,0 +1,1790 @@
+import graphviz%0A%0Adef lolviz(table):%0A %22%22%22%0A Given a list of lists such as:%0A%0A %5B %5B('a','3')%5D, %5B%5D, %5B('b',230), ('c',21)%5D %5D%0A%0A return the dot/graphviz to display as a two-dimensional%0A structure.%0A %22%22%22%0A s = %22%22%22%0A digraph G %7B%0A nodesep=.05;%0A rankdir=LR;%0A node %5Bshape=record,width=.1,height=.1%5D;%0A %22%22%22%0A # Make outer list as vertical%0A labels = %5B%5D%0A for i in range(len(table)):%0A bucket = table%5Bi%5D%0A if len(bucket)==0: labels.append(str(i))%0A else: labels.append(%22%3Cf%25d%3E %25d%22 %25 (i,i))%0A%0A s += ' mainlist %5Bcolor=%22#444443%22, fontsize=%229%22, fontcolor=%22#444443%22, fontname=%22Helvetica%22, style=filled, fillcolor=%22#D9E6F5%22, label = %22'+'%7C'.join(labels)+'%22%5D;%5Cn'%0A%0A # define inner lists%0A for i in range(len(table)):%0A bucket = table%5Bi%5D%0A if not bucket or len(bucket)==0: continue%0A elements = %5B%5D%0A for j, el in enumerate(bucket):%0A if type(el)==tuple and len(el)==2: els = %22%25s→%25s%22 %25 el%0A else: els = repr(el)%0A elements.append('%3Ctable BORDER=%220%22 CELLBORDER=%221%22 CELLSPACING=%220%22%3E%3Ctr%3E%3Ctd cellspacing=%220%22 bgcolor=%22#FBFEB0%22 border=%221%22 sides=%22b%22 valign=%22top%22%3E%3Cfont color=%22#444443%22 point-size=%229%22%3E%25d%3C/font%3E%3C/td%3E%3C/tr%3E%3Ctr%3E%3Ctd bgcolor=%22#FBFEB0%22 border=%220%22 align=%22center%22%3E%25s%3C/td%3E%3C/tr%3E%3C/table%3E' %25 (j, els))%0A s += 'node%25d %5Bcolor=%22#444443%22, fontname=%22Helvetica%22, margin=%220.01%22, space=%220.0%22, shape=record label=%3C%7B%25s%7D%3E%5D;%5Cn' %25 (i, '%7C'.join(elements))%0A%0A # Do edges%0A for i in range(len(table)):%0A bucket = table%5Bi%5D%0A if not bucket or len(bucket)==0: continue%0A s += 'mainlist:f%25d -%3E node%25d %5Barrowsize=.5%5D%5Cn' %25 (i,i)%0A s += %22%7D%5Cn%22%0A print s%0A return s%0A%0Ax = %5B %5B('a','3')%5D, %5B%5D, %5B('b',230), ('c',21)%5D %5D%0Adot = lolviz(x)%0Ag = graphviz.Source(dot)%0Ag.render(view=True)
|
|
70b6fde787018daf5b87f485e60c9a26fa542f2e
|
add basic affine 3D transforms
|
lab_3/affine_transform.py
|
lab_3/affine_transform.py
|
Python
| 0.000008 |
@@ -0,0 +1,1469 @@
+from util.matrix import Matrix%0Afrom math import cos, sin%0A%0A%0Adef translation(x, y, z):%0A return Matrix(%5B%0A %5B1, 0, 0, x%5D,%0A %5B0, 1, 0, y%5D,%0A %5B0, 0, 1, z%5D,%0A %5B0, 0, 0, 1%5D%0A %5D)%0A%0A# den = (phi ** 2 + psi ** 2) ** .5%0A# phi /= den%0A# psi /= den%0A# return Matrix(%5B%0A# %5Bphi, -psi, 0%5D,%0A# %5Bpsi, phi, 0%5D,%0A# %5B0, 0, 1%5D%0A# %5D)%0A%0A%0Adef rotation_x(phi):%0A c = cos(phi)%0A s = sin(phi)%0A return Matrix(%5B%0A %5B1, 0, 0, 0%5D,%0A %5B0, c, -s, 0%5D,%0A %5B0, s, c, 0%5D,%0A %5B0, 0, 0, 1%5D%0A %5D)%0A%0A%0Adef rotation_y(phi):%0A c = cos(phi)%0A s = sin(phi)%0A return Matrix(%5B%0A %5Bc, 0, s, 0%5D,%0A %5B0, 1, 0, 0%5D,%0A %5B-s, 0, c, 0%5D,%0A %5B0, 0, 0, 1%5D%0A %5D)%0A%0A%0Adef rotation_z(phi):%0A c = cos(phi)%0A s = sin(phi)%0A return Matrix(%5B%0A %5Bc, -s, 0, 0%5D,%0A %5Bs, c, 0, 0%5D,%0A %5B0, 0, 1, 0%5D,%0A %5B0, 0, 0, 1%5D%0A %5D)%0A%0A%0Adef scaling(kx, ky=None, kz=None):%0A if ky is None and kz is None:%0A ky = kz = kx%0A return Matrix(%5B%0A %5Bkx, 0, 0, 0%5D,%0A %5B0, ky, 0, 0%5D,%0A %5B0, 0, kz, 0%5D,%0A %5B0, 0, 0, 1%5D%0A %5D)%0A%0A%0Amirroring_x = Matrix(%5B%0A %5B1, 0, 0, 0%5D,%0A %5B0, -1, 0, 0%5D,%0A %5B0, 0, -1, 0%5D,%0A %5B0, 0, 0, 1%5D%0A%5D)%0A%0Amirroring_y = Matrix(%5B%0A %5B-1, 0, 0, 0%5D,%0A %5B0, 1, 0, 0%5D,%0A %5B0, 0, -1, 0%5D,%0A %5B0, 0, 0, 1%5D%0A%5D)%0A%0Amirroring_z = Matrix(%5B%0A %5B-1, 0, 0, 0%5D,%0A %5B0, -1, 0, 0%5D,%0A %5B0, 0, 1, 0%5D,%0A %5B0, 0, 0, 1%5D%0A%5D)%0A
|
|
786f75be946427024fa96ae8dcd06d8d1ecd49cc
|
Add the init method to the node model.
|
model/node.py
|
model/node.py
|
Python
| 0 |
@@ -0,0 +1,147 @@
+class NodeModel(Query):%0A def __init__(self, db):%0A self.db = db%0A self.table_name = %22node%22%0A super(NodeModel, self).__init__()
|
|
07467664b699612e10b51bbeafdce79a9d1e0127
|
Write unit test for utility functions
|
test/test_util.py
|
test/test_util.py
|
Python
| 0.000002 |
@@ -0,0 +1,1844 @@
+from __future__ import unicode_literals%0A%0Atry:%0A import io%0A StringIO = io.StringIO%0Aexcept ImportError:%0A import StringIO%0A StringIO = StringIO.StringIO%0Aimport os%0Aimport shutil%0Aimport sys%0Aimport tempfile%0Aimport unittest%0A%0Aimport cudnnenv%0A%0A%0Aclass TestSafeTempDir(unittest.TestCase):%0A%0A def test_safe_temp_dir(self):%0A with cudnnenv.safe_temp_dir() as path:%0A self.assertTrue(os.path.exists(path))%0A self.assertFalse(os.path.exists(path))%0A%0A def test_safe_temp_dir_error(self):%0A try:%0A with cudnnenv.safe_temp_dir() as path:%0A raise Exception%0A except Exception:%0A pass%0A self.assertFalse(os.path.exists(path))%0A%0A%0Aclass TestSafeDir(unittest.TestCase):%0A%0A def setUp(self):%0A self.path = tempfile.mkdtemp()%0A%0A def tearDown(self):%0A shutil.rmtree(self.path, ignore_errors=True)%0A%0A def test_safe_dir(self):%0A path = os.path.join(self.path, 'd')%0A with cudnnenv.safe_dir(path) as p:%0A self.assertTrue(os.path.exists(p))%0A self.assertTrue(os.path.exists(path))%0A%0A def test_safe_dir_error(self):%0A path = os.path.join(self.path, 'd')%0A try:%0A with cudnnenv.safe_dir(path) as p:%0A raise Exception%0A except Exception:%0A pass%0A self.assertFalse(os.path.exists(p))%0A self.assertFalse(os.path.exists(path))%0A%0A%0Aclass TestYesNo(unittest.TestCase):%0A%0A def tearDown(self):%0A sys.stdin = sys.__stdin__%0A%0A def test_yes(self):%0A sys.stdin = StringIO('y%5Cn')%0A self.assertTrue(cudnnenv.yes_no_query('q'))%0A%0A def test_no(self):%0A sys.stdin = StringIO('n%5Cn')%0A self.assertFalse(cudnnenv.yes_no_query('q'))%0A%0A def test_invalid(self):%0A sys.stdin = StringIO('a%5Cnb%5Cnc%5Cnd%5Cny%5Cnn%5Cn')%0A self.assertTrue(cudnnenv.yes_no_query('q'))%0A
|
|
448f18769d7c701d9dd03ff65489656380513d07
|
Add test init.
|
tests/__init__.py
|
tests/__init__.py
|
Python
| 0 |
@@ -0,0 +1,949 @@
+from flexmock import flexmock%0Afrom flask.ext.storage import MockStorage%0Afrom flask_uploads import init%0A%0Acreated_objects = %5B%5D%0Aadded_objects = %5B%5D%0Adeleted_objects = %5B%5D%0Acommitted_objects = %5B%5D%0A%0A%0Aclass MockModel(object):%0A def __init__(self, **kw):%0A created_objects.append(self)%0A for key, val in kw.iteritems():%0A setattr(self, key, val)%0A%0Adb_mock = flexmock(%0A Column=lambda *a, **kw: ('column', a, kw),%0A Integer=('integer', %5B%5D, %7B%7D),%0A Unicode=lambda *a, **kw: ('unicode', a, kw),%0A Model=MockModel,%0A session=flexmock(%0A add=added_objects.append,%0A commit=lambda: committed_objects.extend(%0A added_objects + deleted_objects%0A ),%0A delete=deleted_objects.append,%0A ),%0A)%0A%0A%0Aclass TestCase(object):%0A def setup_method(self, method, resizer=None):%0A init(db_mock, MockStorage, resizer)%0A self.db = db_mock%0A self.Storage = MockStorage%0A self.resizer = resizer%0A
|
|
256648ad4effd9811d7c35ed6ef45de67f108926
|
Add pytest option for specifying the typing module to use
|
tests/conftest.py
|
tests/conftest.py
|
Python
| 0 |
@@ -0,0 +1,325 @@
+import sys%0A%0A%0Adef pytest_addoption(parser):%0A parser.addoption('--typing', action='store', default='typing')%0A%0A%0Adef pytest_configure(config):%0A if config.option.typing == 'no':%0A sys.modules%5B'typing'%5D = None%0A elif config.option.typing != 'typing':%0A sys.modules%5B'typing'%5D = __import__(config.option.typing)%0A%0A
|
|
08f6d31feb493b24792eaabfa11d08faea68c62b
|
add textample plug
|
plugins/textample/textample.py
|
plugins/textample/textample.py
|
Python
| 0 |
@@ -0,0 +1,1195 @@
+# coding=utf-8%0A%0Aimport gzip%0Aimport os%0Aimport random%0Aimport re%0A%0A%0Adef search(regex, base_dir, file_contains=''):%0A reg = re.compile(regex, re.IGNORECASE)%0A for root, _, files in os.walk(base_dir):%0A for file in files:%0A if file.endswith('.gz'):%0A file_path = os.path.join(root, file)%0A if file_contains not in file_path:%0A continue%0A with gzip.open(file_path) as f:%0A for line in f:%0A line = line.decode('utf-8')%0A if reg.search(line):%0A yield (file_path%5Blen(base_dir) + 1:-3%5D, ' '.join(line.split()))%0A%0A%[email protected]%[email protected]('example', 'ex')%0Adef example(argv):%0A %22%22%22Regex search for sentences. Usage: example %3Cregex%3E %5Bfile%5D%22%22%22%0A if len(argv) %3C 2:%0A return%0A%0A base = os.path.join(os.path.dirname(__file__), 'texts')%0A if not os.path.isdir(base):%0A return 'Directory %25s does not exist' %25 base%0A%0A se = search(argv%5B1%5D, base, file_contains=argv%5B2%5D if len(argv) %3E 2 else '')%0A try:%0A return '%25s: %25s' %25 random.choice(list(se))%0A except IndexError as e:%0A return 'No matching sentences found'%0A
|
|
6638431cbfef10b70b338b62a116661ea66c42b0
|
Remove unused import
|
nose2/main.py
|
nose2/main.py
|
import logging
import os
import sys
from nose2.compat import unittest
from nose2 import events, loader, runner, session, util
log = logging.getLogger(__name__)
__unittest = True
class PluggableTestProgram(unittest.TestProgram):
sessionClass = session.Session
loaderClass = loader.PluggableTestLoader
runnerClass = runner.PluggableTestRunner
defaultPlugins = ['nose2.plugins.loader.discovery',
'nose2.plugins.loader.testcases',
'nose2.plugins.loader.functions',
'nose2.plugins.loader.generators',
'nose2.plugins.loader.parameters',
'nose2.plugins.result',
'nose2.plugins.collect',
'nose2.plugins.logcapture',
# etc
]
# XXX override __init__ to warn that testLoader and testRunner are ignored?
def parseArgs(self, argv):
log.debug("parse argv %s", argv)
self.session = self.sessionClass()
self.argparse = self.session.argparse # for convenience
# XXX force these? or can it be avoided?
self.testLoader = self.loaderClass(self.session)
# Parse initial arguments like config file paths, verbosity
self.setInitialArguments()
# FIXME -h here makes processing stop.
cfg_args, argv = self.argparse.parse_known_args(argv[1:])
self.handleCfgArgs(cfg_args)
# Parse arguments for plugins (if any) and test names
self.argparse.add_argument('testNames', nargs='*')
args, argv = self.argparse.parse_known_args(argv)
if argv:
self.argparse.error("Unrecognized arguments: %s" % ' '.join(argv))
self.handleArgs(args)
self.createTests()
def setInitialArguments(self):
self.argparse.add_argument(
'-s', '--start-dir', default='.',
help="Directory to start discovery ('.' default)")
self.argparse.add_argument(
'-t', '--top-level-directory', '--project-directory',
help='Top level directory of project (defaults to start dir)')
self.argparse.add_argument('--config', '-c', nargs='?', action='append',
default=['unittest.cfg', 'nose2.cfg'])
self.argparse.add_argument('--no-user-config', action='store_const',
dest='user_config', const=False, default=True)
self.argparse.add_argument('--no-plugins', action='store_const',
dest='load_plugins', const=False, default=True)
self.argparse.add_argument('--verbose', '-v', action='count', default=0)
self.argparse.add_argument('--quiet', action='store_const',
dest='verbose', const=0)
def handleCfgArgs(self, cfg_args):
if cfg_args.verbose:
self.session.verbosity += cfg_args.verbose
self.session.startDir = cfg_args.start_dir
if cfg_args.top_level_directory:
self.session.topLevelDir = cfg_args.top_level_directory
self.session.loadConfigFiles(*self.findConfigFiles(cfg_args))
self.session.prepareSysPath()
if cfg_args.load_plugins:
self.loadPlugins()
def findConfigFiles(self, cfg_args):
filenames = cfg_args.config[:]
proj_opts = ('unittest.cfg', 'nose2.cfg')
for fn in proj_opts:
if cfg_args.top_level_directory:
fn = os.path.abspath(
os.path.join(cfg_args.top_level_directory, fn))
filenames.append(fn)
if cfg_args.user_config:
user_opts = ('~/.unittest.cfg', '~/.nose2.cfg')
for fn in user_opts:
filenames.append(os.path.expanduser(fn))
return filenames
def handleArgs(self, args):
# FIXME pass arguments to session & plugins
self.testNames = args.testNames
def loadPlugins(self):
# FIXME also pass in plugins set via __init__ args
self.session.loadPlugins(self.defaultPlugins)
def createTests(self):
# XXX belongs in init?
log.debug("Create tests from %s/%s", self.testNames, self.module)
if self.module and '__unittest' in dir(self.module):
self.module = None
self.test = self.testLoader.loadTestsFromNames(
self.testNames, self.module)
def runTests(self):
# fire plugin hook
runner = self._makeRunner()
self.result = runner.run(self.test)
if self.exit:
sys.exit(not self.result.wasSuccessful())
def _makeRunner(self):
runner = self.runnerClass(self.session)
event = events.RunnerCreatedEvent(runner)
self.session.hooks.runnerCreated(event)
return event.runner
main_ = PluggableTestProgram
|
Python
| 0.000001 |
@@ -117,14 +117,8 @@
sion
-, util
%0A%0A%0Al
|
da3248f782d83c46b698c31736b29a42d380511c
|
Add the playground
|
micro/_playground.py
|
micro/_playground.py
|
Python
| 0.999869 |
@@ -0,0 +1,738 @@
+CODE = '''%0Aout str + 2 3%0A'''%0A%0Aif __name__ == '__main__':%0A import lexer%0A import preparser%0A import parser%0A import builtin_functions%0A import sys%0A import evaluate%0A%0A specific_lexer = lexer.Lexer()%0A specific_preparser = preparser.Preparser(specific_lexer)%0A preast = specific_preparser.preparse(CODE)%0A specific_parser = parser.Parser()%0A ast = specific_parser.parse(preast, builtin_functions.BUILTIN_FUNCTIONS)%0A errors = specific_lexer.get_errors() + specific_preparser.get_errors() + specific_parser.get_errors()%0A for some_error in errors:%0A some_error.detect_position(CODE)%0A print(some_error)%0A if errors:%0A sys.exit()%0A%0A evaluate.evaluate(ast, builtin_functions.BUILTIN_FUNCTIONS)%0A
|
|
682b064f29c7a6cfea0c9866da03703822e70cb3
|
Add machinery to slurp dhcpd.leases journal into usable format.
|
propernoun/leases.py
|
propernoun/leases.py
|
Python
| 0 |
@@ -0,0 +1,505 @@
+from . import parser%0Afrom . import watch%0A%0Adef gen_leases(path):%0A %22%22%22%0A Keep track of currently valid leases for ISC dhcpd.%0A%0A Yields dictionaries that map %60%60ip%60%60 to information about the%0A lease. Will block until new information is available.%0A %22%22%22%0A g = watch.watch_dhcp_leases(path)%0A for _ in g:%0A with file(path) as f:%0A s = f.read()%0A leases = %7B%7D%0A for l in parser.parse(s):%0A assert 'ip' in l%0A leases%5Bl%5B'ip'%5D%5D = l%0A yield leases%0A
|
|
ba6c50d0b2fd973c34f2df3779d78df11f671598
|
Create mongo_import_keywords.py
|
mongo_import_keywords.py
|
mongo_import_keywords.py
|
Python
| 0.00004 |
@@ -0,0 +1,2116 @@
+%22%22%22%0ALoad mongo database with keywords for annie annotation.%0AThe keyword_array pickle is packaged with the GRITS classifier.%0A%22%22%22%0Aimport sys%0Aimport re%0Aimport pickle%0Afrom pymongo import MongoClient%0A%0Adef load_keyword_array(file_path):%0A with open(file_path) as f:%0A keyword_array = pickle.load(f)%0A return keyword_array%0A%0Adef insert_set(names_set, collection):%0A %22%22%22Insert a list of names into a collection%22%22%22%0A%0A for name in names_set:%0A collection.insert(%7B'_id': name%7D)%0A%0A%0Aif __name__ == '__main__':%0A import argparse%0A parser = argparse.ArgumentParser()%0A parser.add_argument(%0A %22--mongo_url%22, default='localhost'%0A )%0A parser.add_argument(%0A %22--db_name%22, default='annotation'%0A )%0A args = parser.parse_args()%0A client = MongoClient(args.mongo_url)%0A db = client%5Bargs.db_name%5D%0A%0A category_labels = %7B%0A 'doid/diseases': 'diseases',%0A 'eha/disease': 'diseases',%0A 'pm/disease': 'diseases',%0A 'hm/disease': 'diseases',%0A 'biocaster/diseases': 'diseases',%0A 'eha/symptom': 'symptoms',%0A 'biocaster/symptoms': 'symptoms',%0A 'doid/has_symptom': 'symptoms',%0A 'pm/symptom': 'symptoms',%0A 'symp/symptoms': 'symptoms',%0A 'wordnet/hosts': 'hosts',%0A 'eha/vector': 'hosts',%0A 'wordnet/pathogens': 'pathogens',%0A 'biocaster/pathogens': 'pathogens',%0A 'pm/mode of transmission': 'modes',%0A 'doid/transmitted_by': 'modes',%0A 'eha/mode of transmission': 'modes'%0A %7D%0A%0A collection_labels = set(category_labels.values())%0A for collection in collection_labels:%0A db%5Bcollection%5D.drop()%0A%0A keyword_array = load_keyword_array('current_classifier/keyword_array.p')%0A%0A for keyword in keyword_array:%0A if keyword%5B'category'%5D in category_labels:%0A collection = category_labels%5Bkeyword%5B'category'%5D%5D%0A%0A db%5Bcollection%5D.insert(%0A %7B '_id': keyword%5B'keyword'%5D,%0A 'source': keyword%5B'category'%5D,%0A 'linked_keywords': keyword%5B'linked_keywords'%5D,%0A 'case_sensitive': keyword%5B'case_sensitive'%5D%7D )%0A
|
|
f9917d04f1147fd9bc147bf1a7b5b4797a37207c
|
fix to a buggy community searching code in processIncomingMsg (thanks to Paul Warner for pointing out)
|
pysnmp/proto/secmod/rfc2576.py
|
pysnmp/proto/secmod/rfc2576.py
|
# SNMP v1 & v2c security models implementation
from pyasn1.codec.ber import encoder
from pysnmp.proto.secmod import base
from pysnmp.smi.error import NoSuchInstanceError
from pysnmp.proto import error
from pysnmp import debug
class SnmpV1SecurityModel(base.AbstractSecurityModel):
securityModelID = 1
# According to rfc2576, community name <-> contextEngineId/contextName
# mapping is up to MP module for notifications but belongs to secmod
# responsibility for other PDU types. Since I do not yet understand
# the reason for this de-coupling, I've moved this code from MP-scope
# in here.
def generateRequestMsg(
self,
snmpEngine,
messageProcessingModel,
globalData,
maxMessageSize,
securityModel,
securityEngineId,
securityName,
securityLevel,
scopedPDU
):
msg, = globalData
contextEngineId, contextName, pdu = scopedPDU
# rfc2576: 5.2.3
( snmpCommunityName,
snmpCommunitySecurityName,
snmpCommunityContextEngineId,
snmpCommunityContextName ) = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols(
'SNMP-COMMUNITY-MIB',
'snmpCommunityName',
'snmpCommunitySecurityName',
'snmpCommunityContextEngineID',
'snmpCommunityContextName'
)
mibNodeIdx = snmpCommunitySecurityName
while 1:
try:
mibNodeIdx = snmpCommunitySecurityName.getNextNode(
mibNodeIdx.name
)
except NoSuchInstanceError:
break
if mibNodeIdx.syntax != securityName:
continue
instId = mibNodeIdx.name[len(snmpCommunitySecurityName.name):]
mibNode = snmpCommunityContextEngineId.getNode(
snmpCommunityContextEngineId.name + instId
)
if mibNode.syntax != contextEngineId:
continue
mibNode = snmpCommunityContextName.getNode(
snmpCommunityContextName.name + instId
)
if mibNode.syntax != contextName:
continue
# XXX TODO: snmpCommunityTransportTag
mibNode = snmpCommunityName.getNode(
snmpCommunityName.name + instId
)
securityParameters = mibNode.syntax
debug.logger & debug.flagSM and debug.logger('generateRequestMsg: found community %s for securityName %s contextEngineId %s contextName %s' % (securityParameters, securityName, contextEngineId, contextName))
msg.setComponentByPosition(1, securityParameters)
msg.setComponentByPosition(2)
msg.getComponentByPosition(2).setComponentByType(pdu.tagSet, pdu)
wholeMsg = encoder.encode(msg)
return ( securityParameters, wholeMsg )
raise error.StatusInformation(
errorIndication = 'unknownCommunityName'
)
def generateResponseMsg(
self,
snmpEngine,
messageProcessingModel,
globalData,
maxMessageSize,
securityModel,
securityEngineID,
securityName,
securityLevel,
scopedPDU,
securityStateReference
):
# rfc2576: 5.2.2
msg, = globalData
contextEngineId, contextName, pdu = scopedPDU
cachedSecurityData = self._cachePop(securityStateReference)
communityName = cachedSecurityData['communityName']
debug.logger & debug.flagSM and debug.logger('generateResponseMsg: recovered community %s by securityStateReference %s' % (communityName, securityStateReference))
msg.setComponentByPosition(1, communityName)
msg.setComponentByPosition(2)
msg.getComponentByPosition(2).setComponentByType(pdu.tagSet, pdu)
wholeMsg = encoder.encode(msg)
return ( communityName, wholeMsg )
def processIncomingMsg(
self,
snmpEngine,
messageProcessingModel,
maxMessageSize,
securityParameters,
securityModel,
securityLevel,
wholeMsg,
msg
):
# rfc2576: 5.2.1
( communityName, srcTransport, destTransport ) = securityParameters
( snmpCommunityName,
snmpCommunitySecurityName,
snmpCommunityContextEngineId,
snmpCommunityContextName
) = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols(
'SNMP-COMMUNITY-MIB',
'snmpCommunityName',
'snmpCommunitySecurityName',
'snmpCommunityContextEngineID',
'snmpCommunityContextName'
)
mibNodeIdx = snmpCommunityName
while 1:
try:
mibNodeIdx = snmpCommunityName.getNextNode(
mibNodeIdx.name
)
except NoSuchInstanceError:
break
if mibNodeIdx.syntax != communityName:
continue
break
else:
snmpInBadCommunityNames, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-COMMUNITY-MIB', 'snmpInBadCommunityNames')
snmpInBadCommunityNames.syntax = snmpInBadCommunityNames.syntax+1
raise error.StatusInformation(
errorIndication = 'unknownCommunityName'
)
# XXX TODO: snmpCommunityTransportTag
instId = mibNodeIdx.name[len(snmpCommunityName.name):]
communityName = snmpCommunityName.getNode(
snmpCommunityName.name + instId
)
securityName = snmpCommunitySecurityName.getNode(
snmpCommunitySecurityName.name + instId
)
contextEngineId = snmpCommunityContextEngineId.getNode(
snmpCommunityContextEngineId.name + instId
)
contextName = snmpCommunityContextName.getNode(
snmpCommunityContextName.name + instId
)
snmpEngineID, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineID')
debug.logger & debug.flagSM and debug.logger('processIncomingMsg: looked up securityName %s contextEngineId %s contextName %s by communityName %s' % (securityName, contextEngineId, contextName, communityName))
stateReference = self._cachePush(
communityName=communityName.syntax
)
securityEngineID = snmpEngineID.syntax
securityName = securityName.syntax
scopedPDU = (
contextEngineId.syntax, contextName.syntax,
msg.getComponentByPosition(2).getComponent()
)
maxSizeResponseScopedPDU = maxMessageSize - 128
securityStateReference = stateReference
debug.logger & debug.flagSM and debug.logger('processIncomingMsg: generated maxSizeResponseScopedPDU %s securityStateReference %s' % (maxSizeResponseScopedPDU, securityStateReference))
return ( securityEngineID,
securityName,
scopedPDU,
maxSizeResponseScopedPDU,
securityStateReference )
class SnmpV2cSecurityModel(SnmpV1SecurityModel):
securityModelID = 2
# XXX
# contextEngineId/contextName goes to globalData
|
Python
| 0 |
@@ -5051,134 +5051,8 @@
- break%0A if mibNodeIdx.syntax != communityName:%0A continue%0A break%0A else:%0A
@@ -5159,34 +5159,26 @@
bols('__SNMP
--COMMUNITY
+v2
-MIB', 'snmp
@@ -5199,16 +5199,20 @@
Names')%0A
+
@@ -5285,32 +5285,36 @@
x+1%0A
+
+
raise error.Stat
@@ -5320,32 +5320,36 @@
tusInformation(%0A
+
@@ -5397,33 +5397,110 @@
-)
+ )%0A if mibNodeIdx.syntax == communityName:%0A break
%0A %0A
|
a21ed2d12b763d93722b6c8e9f6d6ff39d15938c
|
add utility to fetch satellites and corresponding TLEs
|
python-files/get-satellites.py
|
python-files/get-satellites.py
|
Python
| 0 |
@@ -0,0 +1,1522 @@
+#!/usr/bin/env python3%0A%0A%22%22%22%0AUtility to get the station information from a SatNOGS Network server.%0A%0ACollects the paginated objects into a single JSON list and stores in a file.%0A%22%22%22%0A%0Aimport json%0Aimport sqlite3%0Aimport requests%0A%0Aimport orbit%0A%0A%0A# default expire time is 24 hours%0Aorbit.tle.requests_cache.configure(expire_after=60*60*6)%0A%0A%0AURL = 'https://db.satnogs.org/api/satellites'%0ASATELLITES_JSON = 'satellites.json'%0ATLE_DB = 'tle.db'%0A%0A%0A# fetch known satellites%0Ar = requests.get(URL)%0Asatellites = r.json()%0A%0Awith open(SATELLITES_JSON, 'w') as fp:%0A json.dump(satellites, fp)%0A%0A%0Aconn = sqlite3.connect('file:' + TLE_DB, uri=True,%0A detect_types=sqlite3.PARSE_DECLTYPES)%0Acur = conn.cursor()%0Acur.execute('''CREATE TABLE IF NOT EXISTS tle%0A (norad integer,%0A epoch timestamp,%0A line0 text,%0A line1 text,%0A line2 text,%0A unique(norad, epoch)%0A );''')%0A%0Afor sat in satellites:%0A norad = sat%5B'norad_cat_id'%5D%0A print(norad, end='')%0A try:%0A tle = orbit.satellite(norad)%0A except KeyError:%0A print(' ** not at CelesTrak')%0A continue%0A%0A try:%0A cur.execute(%0A 'INSERT INTO tle VALUES (?,?,?,?,?);',%0A (norad, tle.epoch(), tle.tle_raw%5B0%5D, tle.tle_raw%5B1%5D, tle.tle_raw%5B2%5D))%0A # 'INSERT OR IGNORE INTO ...' will suppress the exception%0A except sqlite3.IntegrityError:%0A pass%0A else:%0A print(' TLE updated', end='')%0A finally:%0A print()%0A%0Aconn.commit()%0Aconn.close()%0A
|
|
550873226ec0879a86fea2527b56535a329981b1
|
Add upcoming_match.py
|
upcoming_match.py
|
upcoming_match.py
|
Python
| 0.000001 |
@@ -0,0 +1,455 @@
+#! /usr/bin/env python%0A#%0A# Tests sending an upcoming_match notification via adb to The Blue Alliance%0A# Android app.%0A%0Aimport test_notification%0A%0A%0Ajson_data = %7B%22match_key%22: %222007cmp_sf1m3%22,%0A %22event_name%22: %22Championship - Einstein Field%22,%0A %22team_keys%22: %5B%22frc173%22,%22frc1319%22,%22frc1902%22,%22frc177%22,%22frc987%22,%22frc190%22%5D,%0A %22scheduled_time%22:12345,%0A %22predicted_time%22:122345%7D%0A%0Aif __name__ == '__main__':%0A test_notification.upcoming_match_command(json_data)%0A
|
|
1d8cbf94f127571358aee97677a09f7cea3bf3a7
|
Add helper functions for to/from bytes/unicode
|
p23serialize/util.py
|
p23serialize/util.py
|
Python
| 0.000001 |
@@ -0,0 +1,548 @@
+from . import str_mode%0A%0Aif str_mode == 'bytes':%0A unicode_type = unicode%0Aelse: # str_mode == 'unicode'%0A unicode_type = str%0A%0Adef recursive_unicode(obj):%0A if isinstance(obj, bytes):%0A return obj.decode('latin1')%0A elif isinstance(obj, list):%0A return %5Brecursive_unicode(_) for _ in obj%5D%0A else:%0A return obj%0A%0Adef recursive_bytes(obj):%0A if isinstance(obj, unicode_type):%0A return obj.encode('latin1')%0A elif isinstance(obj, list):%0A return %5Brecursive_bytes(_) for _ in obj%5D%0A else:%0A return obj%0A
|
|
01f21a16e4bcecccf51a565b51222ab18b79adb4
|
Add tests for shell utils.
|
st2common/tests/unit/test_util_shell.py
|
st2common/tests/unit/test_util_shell.py
|
Python
| 0 |
@@ -0,0 +1,2606 @@
+# Licensed to the StackStorm, Inc ('StackStorm') under one or more%0A# contributor license agreements. See the NOTICE file distributed with%0A# this work for additional information regarding copyright ownership.%0A# The ASF licenses this file to You under the Apache License, Version 2.0%0A# (the %22License%22); you may not use this file except in compliance with%0A# the License. You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0Aimport unittest2%0A%0Afrom st2common.util.shell import quote_unix%0Afrom st2common.util.shell import quote_windows%0A%0A%0Aclass ShellUtilsTestCase(unittest2.TestCase):%0A def test_quote_unix(self):%0A arguments = %5B%0A 'foo',%0A 'foo bar',%0A 'foo1 bar1',%0A '%22foo%22',%0A '%22foo%22 %22bar%22',%0A %22'foo bar'%22%0A %5D%0A expected_values = %5B%0A %22%22%22%0A foo%0A %22%22%22,%0A%0A %22%22%22%0A 'foo bar'%0A %22%22%22,%0A%0A %22%22%22%0A 'foo1 bar1'%0A %22%22%22,%0A%0A %22%22%22%0A '%22foo%22'%0A %22%22%22,%0A%0A %22%22%22%0A '%22foo%22 %22bar%22'%0A %22%22%22,%0A%0A %22%22%22%0A ''%22'%22'foo bar'%22'%22''%0A %22%22%22%0A %5D%0A%0A for argument, expected_value in zip(arguments, expected_values):%0A actual_value = quote_unix(value=argument)%0A expected_value = expected_value.lstrip()%0A self.assertEqual(actual_value, expected_value.strip())%0A%0A def test_quote_windows(self):%0A arguments = %5B%0A 'foo',%0A 'foo bar',%0A 'foo1 bar1',%0A '%22foo%22',%0A '%22foo%22 %22bar%22',%0A %22'foo bar'%22%0A %5D%0A expected_values = %5B%0A %22%22%22%0A foo%0A %22%22%22,%0A%0A %22%22%22%0A %22foo bar%22%0A %22%22%22,%0A%0A %22%22%22%0A %22foo1 bar1%22%0A %22%22%22,%0A%0A %22%22%22%0A %5C%5C%22foo%5C%5C%22%0A %22%22%22,%0A%0A %22%22%22%0A %22%5C%5C%22foo%5C%5C%22 %5C%5C%22bar%5C%5C%22%22%0A %22%22%22,%0A%0A %22%22%22%0A %22'foo bar'%22%0A %22%22%22%0A %5D%0A%0A for argument, expected_value in zip(arguments, expected_values):%0A actual_value = quote_windows(value=argument)%0A expected_value = expected_value.lstrip()%0A self.assertEqual(actual_value, expected_value.strip())%0A
|
|
f56a902f2e7ca45bb4bf1dfa7dacefd3fefff524
|
Create config.sample
|
zhwikt-broken-file-links/config.sample.py
|
zhwikt-broken-file-links/config.sample.py
|
Python
| 0 |
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-%0Acfg = %7B%0A%09%22category%22: %22Category:%E5%90%AB%E6%9C%89%E5%8F%97%E6%8D%9F%E6%96%87%E4%BB%B6%E9%93%BE%E6%8E%A5%E7%9A%84%E9%A1%B5%E9%9D%A2%22%0A%7D%0A
|
|
f55051cdfd6c358d9cb131c5fc7ae1e5e330d93e
|
return zero as timestamp when it does not present in riak cache(cherry picked from commit d4c290959524cb2bac2109f6867a0500ec8e7d9e)
|
mapproxy/cache/riak.py
|
mapproxy/cache/riak.py
|
# This file is part of the MapProxy project.
# Copyright (C) 2013 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement, absolute_import
import time
import threading
import hashlib
from io import BytesIO
from mapproxy.image import ImageSource
from mapproxy.cache.tile import Tile
from mapproxy.cache.base import (
TileCacheBase, FileBasedLocking,
tile_buffer, CacheBackendError,)
try:
import riak
except ImportError:
riak = None
import logging
log = logging.getLogger(__name__)
class UnexpectedResponse(CacheBackendError):
pass
class RiakCache(TileCacheBase, FileBasedLocking):
def __init__(self, nodes, protocol, bucket, tile_grid, lock_dir, use_secondary_index=False):
if riak is None:
raise ImportError("Riak backend requires 'riak' package.")
self.nodes = nodes
self.protocol = protocol
self.lock_cache_id = 'riak-' + hashlib.md5(nodes[0]['host'] + bucket).hexdigest()
self.lock_dir = lock_dir
self.lock_timeout = 60
self.request_timeout = self.lock_timeout * 1000 # riak timeout is in miliseconds
self.bucket_name = bucket
self.tile_grid = tile_grid
self.use_secondary_index = use_secondary_index
self._db_conn_cache = threading.local()
@property
def connection(self):
if not getattr(self._db_conn_cache, 'connection', None):
self._db_conn_cache.connection = riak.RiakClient(protocol=self.protocol, nodes=self.nodes)
return self._db_conn_cache.connection
@property
def bucket(self):
return self.connection.bucket(self.bucket_name)
def _get_object(self, coord):
(x, y, z) = coord
key = '%(z)d_%(x)d_%(y)d' % locals()
obj = False
try:
obj = self.bucket.get(key, r=1, timeout=self.request_timeout)
except Exception as e:
log.warn('error while requesting %s: %s', key, e)
if not obj:
obj = self.bucket.new(key=key, data=None, content_type='application/octet-stream')
return obj
def _get_timestamp(self, obj):
metadata = obj.usermeta
timestamp = metadata.get('timestamp')
if timestamp == None:
timestamp = float(time.time())
obj.usermeta = {'timestamp':str(timestamp)}
return float(timestamp)
def is_cached(self, tile):
if tile.coord is None or tile.source:
return True
res = self._get_object(tile.coord)
if not res.exists:
return False
tile.timestamp = self._get_timestamp(res)
tile.size = len(res.encoded_data)
return True
def _store_bulk(self, tiles):
for tile in tiles:
res = self._get_object(tile.coord)
with tile_buffer(tile) as buf:
data = buf.read()
res.encoded_data = data
res.usermeta = {
'timestamp': str(tile.timestamp),
'size': str(tile.size),
}
if self.use_secondary_index:
x, y, z = tile.coord
res.add_index('tile_coord_bin', '%02d-%07d-%07d' % (z, x, y))
res.store(return_body=False)
return True
def store_tile(self, tile):
if tile.stored:
return True
return self._store_bulk([tile])
def store_tiles(self, tiles):
tiles = [t for t in tiles if not t.stored]
return self._store_bulk(tiles)
def load_tile_metadata(self, tile):
if tile.timestamp:
return
# is_cached loads metadata
self.is_cached(tile)
def load_tile(self, tile, with_metadata=False):
if not tile.is_missing():
return True
res = self._get_object(tile.coord)
if res.exists:
tile_data = BytesIO(res.encoded_data)
tile.source = ImageSource(tile_data)
if with_metadata:
tile.timestamp = self._get_timestamp(res)
tile.size = len(res.encoded_data)
return True
return False
def remove_tile(self, tile):
if tile.coord is None:
return True
res = self._get_object(tile.coord)
if not res.exists:
# already removed
return True
res.delete()
return True
def _fill_metadata_from_obj(self, obj, tile):
tile_md = obj.usermeta
timestamp = tile_md.get('timestamp')
if timestamp:
tile.timestamp = float(timestamp)
def _key_iterator(self, level):
"""
Generator for all tile keys in `level`.
"""
# index() returns a list of all keys so we check for tiles in
# batches of `chunk_size`*`chunk_size`.
grid_size = self.tile_grid.grid_sizes[level]
chunk_size = 256
for x in range(grid_size[0]/chunk_size):
start_x = x * chunk_size
end_x = start_x + chunk_size - 1
for y in range(grid_size[1]/chunk_size):
start_y = y * chunk_size
end_y = start_y + chunk_size - 1
query = self.bucket.get_index('tile_coord_bin',
'%02d-%07d-%07d' % (level, start_x, start_y),
'%02d-%07d-%07d' % (level, end_x, end_y))
for link in query.run():
yield link.get_key()
def remove_tiles_for_level(self, level, before_timestamp=None):
bucket = self.bucket
client = self.connection
for key in self._key_iterator(level):
if before_timestamp:
obj = self.bucket.get(key, r=1)
dummy_tile = Tile((0, 0, 0))
self._fill_metadata_from_obj(obj, dummy_tile)
if dummy_tile.timestamp < before_timestamp:
obj.delete()
else:
riak.RiakObject(client, bucket, key).delete()
|
Python
| 0 |
@@ -2755,17 +2755,17 @@
mestamp
-=
+!
= None:%0A
@@ -2776,27 +2776,22 @@
-timestamp =
+return
float(t
@@ -2797,15 +2797,13 @@
time
-.time()
+stamp
)%0A
@@ -2800,32 +2800,37 @@
estamp)%0A
+%0A
obj.usermeta
@@ -2849,56 +2849,32 @@
mp':
-str(timestamp)%7D%0A%0A return float(timestamp)
+ '0'%7D%0A return 0.0
%0A%0A
|
18d129613c5a576b770a812f18ff05873925fb2c
|
refactor to a shorter version.
|
restclients/digitlib/curric.py
|
restclients/digitlib/curric.py
|
"""
This is the interface for interacting with the UW Libraries Web Service.
"""
import logging
from restclients.digitlib import get_resource
url_prefix = "/php/currics/service.php?code="
sln_prefix = "&sln="
quarter_prefix = "&quarter="
year_prefix = "&year="
logger = logging.getLogger(__name__)
def get_subject_guide(course_code, sln, quarter, year):
"""
:param sln: positive integer
:param year: four digit number
Return the string representing the url of
the Library subject guide page
"""
url = "%s%s%s%s%s%s%s%s" % (url_prefix,
course_code.replace(" ", "%20"),
sln_prefix, sln,
quarter_prefix, quarter,
year_prefix, year)
return _extract_url(get_resource(url))
def _extract_url(data_in_resp):
"""
:param data_in_resp: dict
Return the string representing the url
"""
if data_in_resp is not None:
if data_in_resp.get("Location") is not None:
return data_in_resp.get("Location")
if data_in_resp.get("location") is not None:
return data_in_resp.get("location")
logger.warn("Invalid library curric response: %s" % data_in_resp)
return None
|
Python
| 0.000174 |
@@ -993,33 +993,16 @@
if
-data_in_resp.get(
%22Locatio
@@ -999,37 +999,40 @@
f %22Location%22
-)
i
-s not None
+n data_in_resp
:%0A
@@ -1052,21 +1052,17 @@
_in_resp
-.get(
+%5B
%22Locatio
@@ -1059,25 +1059,25 @@
p%5B%22Location%22
-)
+%5D
%0A if
@@ -1076,33 +1076,16 @@
if
-data_in_resp.get(
%22locatio
@@ -1090,21 +1090,24 @@
ion%22
-)
i
-s not None
+n data_in_resp
:%0A
@@ -1139,13 +1139,9 @@
resp
-.get(
+%5B
%22loc
@@ -1146,17 +1146,17 @@
ocation%22
-)
+%5D
%0A log
|
f22f833efb45bdfe0458d045cfd300721185dc84
|
Revert "bug fix"
|
sabToSickBeardwithConverter.py
|
sabToSickBeardwithConverter.py
|
import os
import sys
import autoProcessTV
from readSettings import ReadSettings
from mkvtomp4 import MkvtoMp4
from extensions import valid_input_extensions
settings = ReadSettings(os.path.dirname(sys.argv[0]), "autoProcess.ini")
path = str(sys.argv[1])
for r, d, f in os.walk(path):
for files in f:
if os.path.splitext(files)[1][1:] in valid_input_extensions:
file = os.path.join(r, files)
convert = MkvtoMp4(file, FFMPEG_PATH=settings.ffmpeg, FFPROBE_PATH=settings.ffprobe, delete=settings.delete, output_extension=settings.output_extension, relocate_moov=settings.relocate_moov, iOS=settings.iOS)
"""Contents of sabToSickbeard.py"""
if len(sys.argv) < 2:
print "No folder supplied - is this being called from SABnzbd?"
sys.exit()
elif len(sys.argv) >= 3:
autoProcessTV.processEpisode(sys.argv[1], sys.argv[2])
else:
autoProcessTV.processEpisode(sys.argv[1])
|
Python
| 0 |
@@ -440,20 +440,20 @@
kvtoMp4(
-file
+path
, FFMPEG
|
d3a9824ea2f7675e9e0008b5d914f02e63e19d85
|
Add new package. (#22639)
|
var/spack/repos/builtin/packages/liblbfgs/package.py
|
var/spack/repos/builtin/packages/liblbfgs/package.py
|
Python
| 0 |
@@ -0,0 +1,1217 @@
+# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass Liblbfgs(AutotoolsPackage):%0A %22%22%22libLBFGS is a C port of the implementation of Limited-memory%0A Broyden-Fletcher-Goldfarb-Shanno (L-BFGS) method written by Jorge Nocedal.%0A%0A The L-BFGS method solves the unconstrainted minimization problem:%0A minimize F(x), x = (x1, x2, ..., xN),%0A only if the objective function F(x) and its gradient G(x) are computable.%22%22%22%0A%0A homepage = %22http://www.chokkan.org/software/liblbfgs/%22%0A url = %22https://github.com/downloads/chokkan/liblbfgs/liblbfgs-1.10.tar.gz%22%0A git = %22https://github.com/chokkan/liblbfgs.git%22%0A%0A maintainers = %5B'RemiLacroix-IDRIS'%5D%0A%0A version('master', branch='master')%0A version('1.10', sha256='4158ab7402b573e5c69d5f6b03c973047a91e16ca5737d3347e3af9c906868cf')%0A%0A depends_on('autoconf', type='build', when='@master')%0A depends_on('automake', type='build', when='@master')%0A depends_on('libtool', type='build', when='@master')%0A depends_on('m4', type='build', when='@master')%0A
|
|
a568663ebcf8b45a801df2cf2185dd3e7c969a79
|
Fix fragile command description
|
vint/linting/policy/prohibit_command_rely_on_user.py
|
vint/linting/policy/prohibit_command_rely_on_user.py
|
import re
from vint.ast.node_type import NodeType
from vint.linting.level import Level
from vint.linting.policy.abstract_policy import AbstractPolicy
from vint.linting.policy.reference.googlevimscriptstyleguide import get_reference_source
from vint.linting.policy_loader import register_policy
PROHIBITED_COMMAND_PATTERN = re.compile(r'norm(al)?\s|'
r's(u(bstitute)?)?/')
@register_policy
class ProhibitCommandRelyOnUser(AbstractPolicy):
def __init__(self):
super(ProhibitCommandRelyOnUser, self).__init__()
self.description = 'Prefer single quoted strings'
self.reference = get_reference_source('FRAGILE')
self.level = Level.WARNING
def listen_node_types(self):
return [NodeType.EXCMD]
def is_valid(self, node, lint_context):
""" Whether the specified node is valid.
This policy prohibit following commands:
- normal without !
- substitute
"""
command = node['str']
is_command_not_prohibited = PROHIBITED_COMMAND_PATTERN.search(command) is None
return is_command_not_prohibited
|
Python
| 0.004767 |
@@ -589,32 +589,45 @@
= '
-Prefer single quoted str
+Avoid commands that rely on user sett
ings
|
b55277497559fad19f790ba8821f02ff2ce20c91
|
add a minimal smoke test of multi-run
|
bluesky/tests/test_multi_runs.py
|
bluesky/tests/test_multi_runs.py
|
Python
| 0 |
@@ -0,0 +1,1130 @@
+from bluesky import preprocessors as bpp%0Afrom bluesky import plans as bp%0Afrom bluesky import plan_stubs as bps%0Afrom bluesky.preprocessors import define_run_wrapper as drw%0Afrom ophyd.sim import motor, det%0Afrom bluesky.tests.utils import DocCollector%0A%0A%0Adef test_multirun_smoke(RE, hw):%0A dc = DocCollector()%0A RE.subscribe(dc.insert)%0A%0A def interlaced_plan(dets, motor):%0A to_read = (motor, *dets)%0A run_ids = list(%22abc%22)%0A for rid in run_ids:%0A yield from drw(bps.open_run(md=%7Brid: rid%7D), run_id=rid)%0A%0A for j in range(5):%0A for i, rid in enumerate(run_ids):%0A yield from bps.mov(motor, j + 0.1 * i)%0A yield from drw(bps.trigger_and_read(to_read), run_id=rid)%0A%0A for rid in run_ids:%0A yield from drw(bps.close_run(), run_id=rid)%0A%0A RE(interlaced_plan(%5Bhw.det%5D, hw.motor))%0A%0A assert len(dc.start) == 3%0A for start in dc.start:%0A desc, = dc.descriptor%5Bstart%5B%22uid%22%5D%5D%0A assert len(dc.event%5Bdesc%5B%22uid%22%5D%5D) == 5%0A%0A for stop in dc.stop.values():%0A for start in dc.start:%0A assert start%5B%22time%22%5D %3C stop%5B%22time%22%5D%0A
|
|
f31b11b2cf1f6924c4373fbfaf4b911102272876
|
add base serializer
|
cla_backend/apps/complaints/serializers.py
|
cla_backend/apps/complaints/serializers.py
|
Python
| 0 |
@@ -0,0 +1,383 @@
+# -*- coding: utf-8 -*-%0Afrom rest_framework import serializers%0A%0Afrom .models import Category, Complaint%0A%0A%0Aclass CategorySerializerBase(serializers.ModelSerializer):%0A class Meta:%0A model = Category%0A fields = ('id', 'name')%0A%0A%0Aclass ComplaintSerializerBase(serializers.ModelSerializer):%0A category = CategorySerializerBase()%0A%0A class Meta:%0A model = Complaint%0A
|
|
9d348cba1c800a4de9a0078ded1e03540256f8a6
|
Add backwards-compatible registration.urls, but have it warn pending deprecation.
|
registration/urls.py
|
registration/urls.py
|
Python
| 0 |
@@ -0,0 +1,233 @@
+import warnings%0A%0Awarnings.warn(%22Using include('registration.urls') is deprecated; use include('registration.backends.default.urls') instead%22,%0A PendingDeprecationWarning)%0A%0Afrom registration.backends.default.urls import *%0A
|
|
d028db776b92c4d968434a64b2c5d7e02867b32e
|
Create db_init.py
|
db_init.py
|
db_init.py
|
Python
| 0.000009 |
@@ -0,0 +1,640 @@
+from sqlalchemy import create_engine, Column, Integer, String, Sequence, update%0Afrom sqlalchemy.ext.declarative import declarative_base%0Afrom sqlalchemy.orm import sessionmaker%0A%0Aengine = create_engine('sqlite:///passwords.db')%0ABase = declarative_base()%0ASession = sessionmaker(bind=engine)%0Asession = Session()%0A%0Aclass Locker(Base):%0A%09__tablename__ = 'locker'%0A%0A%09id = Column(Integer, Sequence('website_id_seq'), primary_key=True)%0A%09url = Column(String(60))%0A%09user = Column(String(60))%0A%09password = Column(String(60))%0A%0A%09def __repr__(self):%0A%09%09return %22%3CWebsite(url=%7B%7D, user=%7B%7D, password=%7B%7D%3E%22.format(url,user,password)%0A%0ABase.metadata.create_all(engine)%0A
|
|
e7fa141bc8fade9c6a34c0bbe95df9a77eb95e0e
|
Update __init__.py
|
tendrl/commons/objects/disk/__init__.py
|
tendrl/commons/objects/disk/__init__.py
|
from tendrl.commons.etcdobj import EtcdObj
from tendrl.commons import objects
class Disk(objects.BaseObject):
def __init__(self, disk_id=None, device_name=None, disk_kernel_name=None,
parent_id=None, parent_name=None, disk_type=None, fsuuid=None,
mount_point=None, model=None, vendor=None, used=None,
serial_no=None, rmversion=None, fstype=None, ssd=None,
size=None, device_number=None, driver=None, group=None,
device=None, bios_id=None, state=None, driver_status=None,
label=None, req_queue_size=None,
mode=None, owner=None, min_io_size=None,
major_to_minor_no=None, device_files=None, sysfs_busid=None,
alignment=None, read_only=None, read_ahead=None,
removable_device=None, scheduler_name=None, sysfs_id=None,
sysfs_device_link=None, geo_bios_edd=None,
geo_bios_legacy=None, geo_logical=None, phy_sector_size=None,
discard_granularity=None, discard_align_offset=None,
discard_max_bytes=None, discard_zeros_data=None,
optimal_io_size=None, log_sector_size=None, drive_status=None,
driver_modules=None, *args, **kwargs):
super(Disk, self).__init__(*args, **kwargs)
self.value = 'nodes/%s/Disks/%s'
self.disk_id = disk_id
self.device_name = device_name
self.disk_kernel_name = disk_kernel_name
self.parent_id = parent_id
self.parent_name = parent_name
self.disk_type = disk_type
self.fsuuid = fsuuid
self.mount_point = mount_point
self.model = model
self.vendor = vendor
self.used = used
self.serial_no = serial_no
self.rmversion = rmversion
self.fstype = fstype
self.ssd = ssd
self.size = size
self.device_number = device_number
self.driver = driver
self.drive_status = drive_status
self.group = group
self.device = device
self.bios_id = bios_id
self.state = state
self.driver_status = driver_status
self.label = label
self.req_queue_size = req_queue_size
self.mode = mode
self.owner = owner
self.min_io_size = min_io_size
self.major_to_minor_no = major_to_minor_no
self.device_files = device_files
self.sysfs_busid = sysfs_busid
self.alignment = alignment
self.read_only = read_only
self.read_ahead = read_ahead
self.removable_device = removable_device
self.scheduler_name = scheduler_name
self.sysfs_id = sysfs_id
self.sysfs_device_link = sysfs_device_link
self.geo_bios_edd = geo_bios_edd
self.geo_bios_legacy = geo_bios_legacy
self.geo_logical = geo_logical
self.phy_sector_size = phy_sector_size
self.discard_granularity = discard_granularity
self.discard_align_offset = discard_align_offset
self.discard_max_bytes = discard_max_bytes
self.discard_zeros_data = discard_zeros_data
self.optimal_io_size = optimal_io_size
self.log_sector_size = log_sector_size
self.driver_modules = driver_modules
self._etcd_cls = _DiskEtcd
class _DiskEtcd(EtcdObj):
"""A table of the service, lazily updated
"""
__name__ = 'nodes/%s/Disks/%s'
_tendrl_cls = Disk
def render(self):
self.__name__ = self.__name__ % (
NS.node_context.node_id, self.disk_id
)
return super(_DiskEtcd, self).render()
|
Python
| 0.000072 |
@@ -1375,32 +1375,36 @@
'nodes/%25s/Disks/
+all/
%25s'%0A self
@@ -3442,16 +3442,20 @@
s/Disks/
+all/
%25s'%0A
|
b40512e834e88f24c20885cddb220188fce11339
|
Add verbose names to UserProfile fields.
|
accounts/migrations/0004_auto_20150227_2347.py
|
accounts/migrations/0004_auto_20150227_2347.py
|
Python
| 0.000001 |
@@ -0,0 +1,798 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('accounts', '0003_auto_20150227_2158'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='userprofile',%0A name='email_on_comment_answer',%0A field=models.BooleanField(default=False, verbose_name=b'Email-Benachrichtigung bei Antwort auf meine Kommentare'),%0A preserve_default=True,%0A ),%0A migrations.AlterField(%0A model_name='userprofile',%0A name='email_on_message',%0A field=models.BooleanField(default=False, verbose_name=b'Email-Benachrichtigung bei Nachrichten'),%0A preserve_default=True,%0A ),%0A %5D%0A
|
|
18baab37c3f1924b104f4ef86224c1b197ef1dad
|
add problem 054
|
problem_054.py
|
problem_054.py
|
Python
| 0.000847 |
@@ -0,0 +1,2200 @@
+#!/usr/bin/env python%0A#-*-coding:utf-8-*-%0A%0A'''%0A'''%0A%0Aimport timeit%0A%0A%0Aclass Poker:%0A def __init__(self, cards):%0A self.numbers = %7B%7D%0A self.suits = %7B%7D%0A for card in cards:%0A n = self._to_number(card%5B0%5D)%0A s = card%5B1%5D%0A self.numbers%5Bn%5D = self.numbers.get(n, 0)+1%0A self.suits%5Bs%5D = self.suits.get(s, 0)+1%0A%0A def hand(self):%0A n_max, n_min, n_len = max(self.numbers), min(self.numbers), len(self.numbers)%0A sames = max(self.numbers.values())%0A s_len = len(self.suits)%0A n_diff = n_max-n_min%0A if n_len == 5:%0A if n_diff %3E 4:%0A if s_len == 1: return 5 # flush%0A else: return 0 # high card%0A elif s_len %3E 1: return 4 # straight%0A elif n_min == 10: return 9 # royal straight flush%0A else: return 8 # straight flush%0A elif n_len == 4: return 1 # one pair%0A elif n_len == 3:%0A if sames == 3: return 3 # three cards%0A else: return 2 # two pair%0A elif n_len == 2:%0A if sames == 4: return 7 # four cards%0A else: return 6 # full house%0A%0A def rank(self):%0A s = ''%0A for k,v in sorted(self.numbers.items(), key=lambda (k, v): (v, k), reverse=True):%0A s += %22%7B0:0%3E2%7D%22.format(str(k))*v%0A return s%0A%0A def _to_number(self, s):%0A s = str(s).replace('T', '10').replace('J', '11')%5C%0A .replace('Q', '12').replace('K', '13').replace('A', '14')%0A return int(s)%0A%0A%0Adef calc():%0A wins = %5B0%5D*3%0A for line in open('data/problem_054.txt', 'r').readlines():%0A cards = line.split(' ')%0A p1 = Poker(%5Bcard.rstrip() for card in cards%5B:5%5D%5D)%0A p2 = Poker(%5Bcard.rstrip() for card in cards%5B5:%5D%5D)%0A if p1.hand() %3E p2.hand(): wins%5B0%5D += 1%0A elif p1.hand() %3C p2.hand(): wins%5B2%5D += 1%0A else:%0A if p1.rank() %3E p2.rank(): wins%5B0%5D += 1%0A elif p1.rank() %3C p2.rank(): wins%5B2%5D += 1%0A else: wins%5B1%5D += 1%0A return wins%0A%0A%0Aif __name__ == '__main__':%0A print calc()%0A # print timeit.Timer('problem_030.calc(5)', 'import problem_030').timeit(1)%0A
|
|
e21d6d88f49dbdeb2dfb96e68f174ba587eaa27a
|
Add pre-deploy version match
|
pre-deploy.py
|
pre-deploy.py
|
Python
| 0 |
@@ -0,0 +1,1140 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%22%22%22%0Apre-deploy.py%0A%0ACreated by Stephan H%C3%BCgel on 2017-06-06%0A%0AA simple check to ensure that the tag version and the library version coincide%0AIntended to be called before a Wheel is written using %22upload%22%0A%22%22%22%0A%0Aimport os%0Aimport sys%0Aimport subprocess%0Aimport re%0Aimport io%0A%0Adef read(*names, **kwargs):%0A with io.open(%0A os.path.join(os.path.dirname(__file__), *names),%0A encoding=kwargs.get(%22encoding%22, %22utf8%22)%0A ) as fp:%0A return fp.read()%0A%0Adef find_version(*file_paths):%0A version_file = read(*file_paths)%0A version_match = re.search(%0A r%22%5E__version__ = %5B'%5C%22%5D(%5B%5E'%5C%22%5D*)%5B'%5C%22%5D%22,%0A version_file,%0A re.M)%0A if version_match:%0A return version_match.group(1)%0A raise RuntimeError(%22Unable to find version string.%22)%0A%0Adef check():%0A git_version = subprocess.check_output(%0A %5B%22git%22, %22describe%22, %22--abbrev=0%22, %22--tags%22%5D%0A ).strip()%0A library_version = unicode(%22v%22 + find_version(%22pyzotero/zotero.py%22)).strip()%0A return library_version == git_version%0A%0Aif __name__ == '__main__':%0A if check():%0A sys.exit(1)%0A else:%0A sys.exit(0)%0A
|
|
bc1c65315fe22146b2d9a0955acc6e286b069657
|
Add problem 48
|
problem_48.py
|
problem_48.py
|
Python
| 0.02481 |
@@ -0,0 +1,335 @@
+'''%0AProblem 48%0A%0A@author: Kevin Ji%0A'''%0A%0Adef self_power_with_mod(number, mod):%0A product = 1%0A%0A for _ in range(number):%0A product *= number%0A product %25= mod%0A%0A return product%0A%0A%0AMOD = 10000000000%0Anumber = 0%0A%0Afor power in range(1, 1000 + 1):%0A number += self_power_with_mod(power, MOD)%0A number %25= MOD%0A%0Aprint(number)%0A
|
|
008625fef55f8f58ab80b883d34ae5d40e55c721
|
Add initial test for binheap
|
test_binheap.py
|
test_binheap.py
|
Python
| 0.000001 |
@@ -0,0 +1,164 @@
+import pytest%0Afrom binheap import Binheap%0A%0A%0Adef test_init_bh():%0A b = Binheap()%0A assert b.binlist is %5B%5D%0A c = Binheap(%5B1, 2%5D)%0A assert c.binlist == %5B1, 2%5D%0A
|
|
4de60043a290a5590dc686f611228a9ddd94a980
|
fix benchmark.py
|
mpmath/tests/benchmark.py
|
mpmath/tests/benchmark.py
|
# Note: the code in this file is a big pile of ugly
from mpmath import *
from mpmath.lib import *
from decimal import getcontext, Decimal
import dmath
from random import seed, random, randint
def getrandom(type):
if type == 'mpf':
return mpf(random() * 2.0**randint(-10, 10)) ** 0.5
if type == 'mpfval':
return (mpf(random() * 2.0**randint(-10, 10)) ** 0.5).val
if type == 'Decimal':
return Decimal(repr(random() * 2.0**randint(-10, 10))).sqrt()
raise TypeError
def rndnums(type, N):
seed(1234)
xs = [getrandom(type) for i in xrange(N)]
ys = xs[::-1]
xys = zip(xs, ys)
return xs, ys, xys
def setprec(type, prec):
if type == 'Decimal':
getcontext().prec = prec
else:
mpf.dps = prec
# change prec value to bits for mpfval use
prec = mpf.prec
return prec
testcode = \
"""
def testit(prec, N):
from time import clock
RF = round_half_even
prec = setprec('TYPE', prec)
xs, ys, xys = rndnums('TYPE', N)
t = 1e100
for i in range(3):
t1 = clock()
for x, y in xys:
OP; OP; OP; OP; OP; OP; OP; OP; OP; OP;
t2 = clock()
t = min(t, (t2-t1)/10)
return t
"""
tests = []
atests = [
('Convert to integer (int(x))', 'int(x)', 'int(x)', 'to_int(x)'),
('Convert to string (str(x))', 'str(x)', 'str(x)', 'to_str(x, int(prec/3.321))'),
('Convert to float (float(x))', 'float(x)', 'float(x)', 'to_float(x)'),
('Equality (x==y)', 'x==y', 'x==y', 'feq(x, y)'),
('Comparison (x<y)', 'x<y', 'x<y', 'fcmp(x, y) < 0'),
('Addition (x+y)', 'x+y', 'x+y', 'fadd(x, y, prec, RF)'),
('Subtraction (x-y)', 'x+y', 'x+y', 'fsub(x, y, prec, RF)'),
('Multiplication (x*y)', 'x*y', 'x*y', 'fmul(x, y, prec, RF)'),
('Division (x/y)', 'x/y', 'x/y', 'fdiv(x, y, prec, RF)'),
('Square root (x^0.5)', 'x.sqrt()', 'sqrt(x)', 'fsqrt(x, prec, RF)'),
('Integer power (x^42)', 'x**42', 'x**42', 'fpow(x, 42, prec, RF)'),
# ('Exponential function (exp(x))', 'dmath.exp(x)', 'exp(x)', 'fexp(x, prec, RF)'),
# ('Natural logarithm (log(x))', 'dmath.log(x+1)', 'log(x)', 'flog(x, prec, RF)'),
# ('Sine (sin(x))', 'dmath.sin(x)', 'sin(x)', 'fsin(x, prec, RF)'),
# ('Tangent (tan(x))', 'dmath.tan(x)', 'tan(x)', 'ftan(x, prec, RF)'),
# ('Inverse tangent(atan(x))', 'dmath.atan(x)', 'atan(x)', 'fatan(x, prec, RF)'),
# ('Hyperbolic cosine (cosh(x))', 'dmath.cosh(x)', 'cosh(x)', 'fcosh(x, prec, RF)')
]
slow = ["exp", "log", "sin", "tan", "cos"]
for op in atests:
cases = [op[0]]
if op[1]:
exec testcode.replace("OP", op[1]).replace("TYPE", "Decimal")
cases += [testit]
else:
cases += [None]
exec testcode.replace("OP", op[2]).replace("TYPE", "mpf")
cases += [testit]
exec testcode.replace("OP", op[3]).replace("TYPE", "mpfval")
cases += [testit]
tests.append(cases)
def rd(x):
if x > 100000: return int(x // 10000) * 10000
if x > 10000: return int(x // 1000) * 1000
if x > 1000: return int(x // 100) * 100
if x > 100: return int(x // 10) * 10
return int(x)
def runtests():
results = []
for test in tests:
name, dectest, mpftest, mpfvaltest = test
if any(s in name for s in slow):
N = 1
precs = [15, 30, 100, 300]
else:
N = 10
precs = [15, 30, 100, 300, 1000]
header_name = "*" + name + "*"
rows = []
for prec in precs:
print name, prec
if dectest is None:
t1 = 1e1000-1e1000
else:
t1 = dectest(prec, N)
t2 = mpftest(prec, N)
t3 = mpfvaltest(prec, N)
s = []
s += ["%i" % prec]
s += [str(rd(N/t1))]
s += [str(rd(N/t2)) + (" (%.1fx)" % (t1/t2))]
s += [str(rd(N/t3)) + (" (%.1fx)" % (t1/t3))]
rows.append(s)
results.append((header_name, rows))
return results
import gc
gc.disable()
results1 = runtests()
import psyco
psyco.full()
results2 = runtests()
import sys
sys.stdout = open("results.txt", "w")
for r1, r2 in zip(results1, results2):
name = r1[0]
print name
print "|| *digits* || *Decimal* || *mpf* || *raw mpf* || *Decimal+psyco* || *mpf+psyco* || *raw mpf+psyco* ||"
for a, b in zip(r1[1], r2[1]):
cols = a + b[1:]
print "|| " + (" || ".join(cols)) + " ||"
print
|
Python
| 0.000015 |
@@ -2008,16 +2008,17 @@
', 'fpow
+i
(x, 42,
@@ -2528,16 +2528,25 @@
slow = %5B
+%22power%22,
%22exp%22, %22
|
a400a24a20dad682e90ad6e52fc8f17a6cac541a
|
Support for methods on User model when checking permissions
|
oscar/views/decorators.py
|
oscar/views/decorators.py
|
from functools import wraps
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.decorators import user_passes_test
from django.shortcuts import render
from django.contrib import messages
from django.contrib.auth.views import redirect_to_login
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from oscar.core.compat import urlparse
def staff_member_required(view_func, login_url=None):
"""
Ensure that the user is a logged-in staff member.
* If not authenticated, redirect to a specified login URL.
* If not staff, show a 403 page
This decorator is based on the decorator with the same name from
django.contrib.admin.views.decorators. This one is superior as it allows a
redirect URL to be specified.
"""
if login_url is None:
login_url = reverse_lazy('customer:login')
@wraps(view_func)
def _checklogin(request, *args, **kwargs):
if request.user.is_active and request.user.is_staff:
return view_func(request, *args, **kwargs)
# If user is not logged in, redirect to login page
if not request.user.is_authenticated():
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
path = request.build_absolute_uri()
login_scheme, login_netloc = urlparse.urlparse(login_url)[:2]
current_scheme, current_netloc = urlparse.urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
messages.warning(request, _("You must log in to access this page"))
return redirect_to_login(path, login_url, REDIRECT_FIELD_NAME)
else:
# User does not have permission to view this page
raise PermissionDenied
return _checklogin
def check_permissions(user, permissions):
"""
Permissions can be a list or a tuple of lists. If it is a tuple,
every permission list will be evaluated and the outcome will be checked
for truthiness.
Each item of the list(s) must be either a valid Django permission name
(model.codename) or an attribute on the User model
(e.g. 'is_active', 'is_superuser').
Example usage:
- permissions_required(['is_staff', ])
would replace staff_member_required
- permissions_required(['is_anonymous', ])
would replace login_forbidden
- permissions_required((['is_staff',], ['partner.dashboard_access']))
allows both staff users and users with the above permission
"""
def _check_one_permission_list(perms):
regular_permissions = [perm for perm in perms if '.' in perm]
conditions = [perm for perm in perms if '.' not in perm]
if conditions and ['is_active', 'is_anonymous'] not in conditions:
# always check for is_active where appropriate
conditions.append('is_active')
passes_conditions = all([getattr(user, perm) for perm in conditions])
return passes_conditions and user.has_perms(regular_permissions)
if permissions is None:
return True
elif isinstance(permissions, list):
return _check_one_permission_list(permissions)
else:
return any(_check_one_permission_list(perm) for perm in permissions)
def permissions_required(permissions, login_url=None):
"""
Decorator that checks if a user has the given permissions.
Accepts a list or tuple of lists of permissions (see check_permissions
documentation).
If the user is not logged in and the test fails, she is redirected to a
login page. If the user is logged in, she gets a HTTP 403 Permission Denied
message, analogous to Django's permission_required decorator.
"""
if login_url is None:
login_url = reverse_lazy('customer:login')
def _check_permissions(user):
outcome = check_permissions(user, permissions)
if not outcome and user.is_authenticated():
raise PermissionDenied
else:
return outcome
return user_passes_test(_check_permissions, login_url=login_url)
def login_forbidden(view_func, template_name='login_forbidden.html',
status=403):
"""
Only allow anonymous users to access this view.
"""
@wraps(view_func)
def _checklogin(request, *args, **kwargs):
if not request.user.is_authenticated():
return view_func(request, *args, **kwargs)
return render(request, template_name, status=status)
return _checklogin
|
Python
| 0.000009 |
@@ -2366,19 +2366,27 @@
or a
-n attribute
+ property or method
on
@@ -2966,39 +2966,112 @@
-if conditions and %5B'is_active',
+# always check for is_active if not checking for is_anonymous%0A if (conditions and%0A
'is
@@ -3081,17 +3081,16 @@
onymous'
-%5D
not in
@@ -3099,17 +3099,20 @@
nditions
-:
+ and
%0A
@@ -3120,54 +3120,43 @@
-# always check for is_active where appropriate
+ 'is_active' not in conditions):
%0A
@@ -3207,32 +3207,21 @@
-passes_conditions = all(
+attributes =
%5Bget
@@ -3260,16 +3260,196 @@
ditions%5D
+%0A # evaluates methods, explicitly casts properties to booleans%0A passes_conditions = all(%5B%0A attr() if callable(attr) else bool(attr) for attr in attributes%5D
)%0A
@@ -3523,16 +3523,20 @@
%0A if
+not
permissi
@@ -3538,24 +3538,16 @@
missions
- is None
:%0A
|
d43c67a59dcf6c43667d633df8b6f8a3eb84d611
|
add HelloKhalaClient2.py
|
examples/testClient/HelloKhalaClient2.py
|
examples/testClient/HelloKhalaClient2.py
|
Python
| 0.000001 |
@@ -0,0 +1,1167 @@
+#moss's HelloKhala Client%0D%0A#add time type%0D%0Aimport socket%0D%0Aimport struct%0D%0Aimport json%0D%0A%0D%0Adef login():%0D%0A%09send = %7B'type': 'login'%7D%0D%0A%09return send%0D%0Adef logout():%0D%0A%09send = %7B'type': 'logout'%7D%0D%0A%09return send%0D%0Adef devType():%0D%0A%09send = %7B'type': 'dev'%7D%0D%0A%09return send%0D%0Adef isLogin():%0D%0A%09send = %7B'type': 'isLogin'%7D%0D%0A%09return send%09%0D%0Adef nodeId():%0D%0A%09send = %7B'type': 'nodeId'%7D%0D%0A%09return send%0D%0Adef time():%0D%0A%09send = %7B'type':'time'%7D%0D%0A%09return send%0D%0Adef default():%0D%0A%09return -1%0D%0A%0D%0As = socket.socket(socket.AF_INET, socket.SOCK_STREAM)%0D%0As.connect(('127.0.0.1', 2007))%0D%0Aoperator = %7B'login':login,'logout':logout,'devType':devType,'isLogin':isLogin,'nodeId':nodeId,'time':time%7D %0D%0Awhile True:%0D%0A%09input = raw_input('%5Binput cmd%5D:')%0D%0A%09sendStr = operator.get(input,default)()%0D%0A%09if sendStr == -1:%0D%0A%09%09print 'err type:',input,'please input again!'%0D%0A%09%09continue%0D%0A%09strjson = json.dumps(sendStr)%0D%0A%09print '%5Bsend msg%5D:',strjson%0D%0A%09inputLen = len(strjson)%0D%0A%09pstr = '%3EI'+ str(inputLen)+'s'%0D%0A%09bytes = struct.pack(pstr, inputLen,strjson)%0D%0A%09s.send(bytes)%0D%0A%09d = s.recv(1024)%0D%0A%09if len(d) == 0:%0D%0A%09%09print 'exit'%0D%0A%09%09break%0D%0A%09print '%5Brev msg%5D:',d%0D%0A%09print ''%0D%0A%09if d == 'logout success!':%0D%0A%09%09print 'exit'%0D%0A%09%09break%0D%0As.close()%0D%0A
|
|
80e80bff7603e852710df6c9de613b1781877b2d
|
Test case for two classes with the same name in one module.
|
tests/python/typeinference/same_name.py
|
tests/python/typeinference/same_name.py
|
Python
| 0 |
@@ -0,0 +1,171 @@
+class A(object):%0A def method(self):%0A return 1%0A%0AA().method() ## type int%0A%0A%0Aclass A(object):%0A def method(self):%0A return %22test%22%0A%0AA().method() ## type str%0A
|
|
bd766630ceadcea1928a8b582a178d49788ecc53
|
Fix try_cast_int #66
|
quora/quora.py
|
quora/quora.py
|
from bs4 import BeautifulSoup
import re
import requests
####################################################################
# Helpers
####################################################################
def try_cast_int(s):
try:
temp = re.findall('\d', str(s))
temp = ''.join(temp)
return int(temp)
except ValueError:
return s
def get_question_link(soup):
question_link = soup.find('a', attrs = {'class' : 'question_link'})
return 'http://www.quora.com' + question_link.get('href')
def get_author(soup):
raw_author = soup.find('div', attrs = {'class' : 'author_info'}).next.get('href')
author = raw_author.split('/')[-1]
return author
def extract_username(username):
if 'https://www.quora.com/' not in username['href']:
return username['href'][1:]
else:
username = re.search("[a-zA-Z-\-]*\-+[a-zA-Z]*-?[0-9]*$", username['href'])
if username is not None:
return username.group(0)
else:
return None
####################################################################
# API
####################################################################
class Quora:
@staticmethod
def get_one_answer(question, author=None):
if author is None: # For short URL's
if re.match('http', question): # question like http://qr.ae/znrZ3
soup = BeautifulSoup(requests.get(question).text)
else: # question like znrZ3
soup = BeautifulSoup(requests.get('http://qr.ae/' + question).text)
else:
soup = BeautifulSoup(requests.get('http://www.quora.com/' + question + '/answer/' + author).text)
return Quora.scrape_one_answer(soup)
@staticmethod
def scrape_one_answer(soup):
answer = soup.find('div', id = re.compile('_answer_content$')).find('div', id = re.compile('_container'))
question_link = get_question_link(soup)
author = get_author(soup)
views = soup.find('span', attrs = {'class' : 'stats_row'}).next.next.next.next
want_answers = soup.find('span', attrs = {'class' : 'count'}).string
try:
upvote_count = soup.find('a', attrs = {'class' : 'vote_item_link'}).find('span', attrs = {'class' : 'count'})
if upvote_count is None:
upvote_count = 0
except:
upvote_count = 0
try:
comment_count = soup.find_all('a', id = re.compile('_view_comment_link'))[-1].find('span').string
# '+' is dropped from the number of comments.
# Only the comments directly on the answer are considered. Comments on comments are ignored.
except:
comment_count = 0
answer_stats = map(try_cast_int, [views, want_answers, upvote_count, comment_count])
answer_dict = {'views' : answer_stats[0],
'want_answers' : answer_stats[1],
'upvote_count' : answer_stats[2],
'comment_count' : answer_stats[3],
'answer' : str(answer),
'question_link' : question_link,
'author' : author
}
return answer_dict
@staticmethod
def get_latest_answers(question):
soup = BeautifulSoup(requests.get('http://www.quora.com/' + question + '/log').text)
authors = Quora.scrape_latest_answers(soup)
return [Quora.get_one_answer(question, author) for author in authors]
@staticmethod
def scrape_latest_answers(soup):
authors = []
clean_logs = []
raw_logs = soup.find_all('div', attrs={'class' : 'feed_item_activity'})
for entry in raw_logs:
if 'Answer added by' in entry.next:
username = entry.find('a', attrs={'class' : 'user'})
if username is not None:
username = extract_username(username)
if username not in authors:
authors.append(username)
return authors
@staticmethod
def get_question_stats(question):
soup = BeautifulSoup(requests.get('http://www.quora.com/' + question).text)
return Quora.scrape_question_stats(soup)
@staticmethod
def scrape_question_stats(soup):
raw_topics = soup.find_all('span', attrs={'itemprop' : 'title'})
topics = []
for topic in raw_topics:
topics.append(topic.string)
want_answers = soup.find('span', attrs={'class' : 'count'}).string
answer_count = soup.find('div', attrs={'class' : 'answer_count'}).next.split()[0]
question_text = list(soup.find('div', attrs = {'class' : 'question_text_edit'}).find('h1').children)[-1]
question_details = soup.find('div', attrs = {'class' : 'question_details_text'})
answer_wiki = soup.find('div', attrs = {'class' : 'AnswerWikiArea'}).find('div')
question_dict = {'want_answers' : try_cast_int(want_answers),
'answer_count' : try_cast_int(answer_count),
'question_text' : question_text.string,
'topics' : topics,
'question_details' : str(question_details),
'answer_wiki' : str(answer_wiki),
}
return question_dict
### Legacy API
@staticmethod
def get_user_stats(u):
from user import User
user = User()
return user.get_user_stats(u)
@staticmethod
def get_user_activity(u):
from user import User
user = User()
return user.get_user_activity(u)
@staticmethod
def get_activity(u):
from user import User
user = User()
return user.get_activity(u)
|
Python
| 0.000002 |
@@ -227,106 +227,478 @@
-try:%0A temp = re.findall('%5Cd', str(s))%0A temp = ''.join(temp)%0A return int(temp)
+pattern = re.compile(r'(%5B0-9%5D+(%5C.%5B0-9%5D+)*%5B %5D*%5BKk%5D)%7C(%5B0-9%5D+)')%0A raw_result = re.search(pattern, s).groups()%0A if raw_result%5B2%5D != None:%0A return int(raw_result%5B2%5D)%0A elif raw_result%5B1%5D == None:%0A raw_result = re.search(r'(%5B0-9%5D+)', raw_result%5B0%5D)%0A return int(raw_result.groups()%5B0%5D) * 1000%0A else:%0A raw_result = re.search(r'(%5B0-9%5D+)%5C.(%5B0-9%5D+)', raw_result%5B0%5D).groups()%0A return int(raw_result%5B0%5D) * 1000 + int(raw_result%5B1%5D) * 100
%0A
|
4887a269a28656c288461165078943f99e2390be
|
add settings template for ansible later
|
ansible/crates_settings.py
|
ansible/crates_settings.py
|
Python
| 0 |
@@ -0,0 +1,846 @@
+from base_settings import *%0A%0A%0A# SECURITY WARNING: keep the secret key used in production secret!%0ASECRET_KEY = 'zhj_+x#q-&vqh7&)7a3it@tcsf50@fh9$3&&j0*4pmt1x=ye+1'%0A%0A# SECURITY WARNING: don't run with debug turned on in production!%0ADEBUG = False%0A%0AALLOWED_HOSTS = %5B'.'%5D%0A%0A# where will nginx look for static files for production?%0A# collect all static files by running ./manage.py collectstatic%0ASTATIC_URL = '/static/'%0ASTATIC_ROOT = '%7B%7Bcrates_dir%7D%7D'%0A%0A%0ACAS_DIRECTORY = abspath('%7B%7Bcas_dir%7D%7D')%0A%0A# Database%0A# https://docs.djangoproject.com/en/1.8/ref/settings/#databases%0ADATABASES = %7B%0A 'default': %7B%0A 'ENGINE': 'django.db.backends.sqlite3',%0A 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),%0A %7D%0A%7D%0A%0A# http://wiki.nginx.org/XSendfile%0A# Faster serving of CAS files. Backed by nginx using Django to authenticate the%0A# request.%0AX_SENDFILE = True%0A
|
|
4143f5381b8ff47a80a550065e831c306551cd77
|
solve problem 035
|
python/035.py
|
python/035.py
|
Python
| 0.000285 |
@@ -0,0 +1,626 @@
+%0Adef base10_to_base2( n ):%0A base2n = 0%0A if n == 0:%0A return 0%0A return base10_to_base2( n/2 ) * 10 + n %25 2%0A%0Adef palindromes( s ):%0A flag = True%0A str_len = len(s)%0A half_len = str_len / 2%0A for i in range( 0, half_len+1 ):%0A if s%5Bi%5D != s%5Bstr_len-i-1%5D:%0A flag = False%0A break%0A return flag%0A%0Adef solve_35():%0A sum = 0%0A for i in range( 1, 1000001 ):%0A if palindromes( str(i) ):%0A #print i%0A base2n = base10_to_base2( i )%0A if palindromes( str(base2n) ):%0A sum = sum + i%0A print i%0A print sum%0A %0Asolve_35()%0A
|
|
a5a7d6c3097571a9ef050a75127a2eb24ad2746c
|
Remove test code.
|
packs/alertlogic/actions/scan_list_scan_executions.py
|
packs/alertlogic/actions/scan_list_scan_executions.py
|
#!/usr/bin/env python
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import requests
import json
import os
import yaml
from getpass import getpass
from st2actions.runners.pythonrunner import Action
from lib import GetScanList
from lib import GetScanExecutions
class ListScanExecutions(Action):
def run(self, scan_title, customer_id=None):
"""
The template class for
Returns: An blank Dict.
Raises:
ValueError: On lack of key in config.
"""
# Set up the results
results = {}
# ChatOps is not passing None, so catch 0...
if customer_id == 0:
customer_id = None
scans = GetScanList(self.config, customer_id)
return GetScanExecutions(self.config, scans[scan_title]['id'])
if __name__ == '__main__':
config_file = "/home/jjm/src/our-configs/alertlogic.yaml"
with open(config_file) as f:
config = yaml.safe_load(f)
action = ListScanExecutions(config)
ScanId = action.run(scan_title="ACI - RDG3 - Martin")
print(json.dumps( ScanId,
sort_keys=True, indent=2))
|
Python
| 0.000001 |
@@ -1535,341 +1535,4 @@
'%5D)%0A
-%0Aif __name__ == '__main__':%0A%0A config_file = %22/home/jjm/src/our-configs/alertlogic.yaml%22%0A with open(config_file) as f:%0A config = yaml.safe_load(f)%0A%0A action = ListScanExecutions(config)%0A ScanId = action.run(scan_title=%22ACI - RDG3 - Martin%22)%0A print(json.dumps( ScanId,%0A sort_keys=True, indent=2))%0A
|
675b76f2bc36d7ce97d2e7227582597067be16bd
|
fix list problem.
|
crawler/git_crawler.py
|
crawler/git_crawler.py
|
# -*- coding: utf-8 -*-
from datetime import datetime
import envoy
from allmychanges.utils import cd, get_package_metadata
def git_clone(repo_path, path):
"""Clone git repo from repo_path to local path"""
r = envoy.run('git clone {repo} {path}'.format(repo=repo_path, path=path))
if r.status_code != 0 and r.std_err != '':
return False
return True
def git_log_hash(path):
"""Return list of tuples ('hash', 'date', 'commit message')"""
splitter = '-----======!!!!!!======-----'
ins = '--!!==!!--'
with cd(path):
r = envoy.run('git log --pretty=format:"%H%n{ins}%n%ai%n{ins}%n%B%n{splitter}"'.format(ins=ins, splitter=splitter))
lst = []
for group in r.std_out.split(splitter)[:-1]:
_hash, date, msg = group.strip().split(ins)
lst.append((_hash.strip(), date.strip(), msg.strip()))
return reversed(lst)
def git_checkout(path, revision_hash):
with cd(path):
r = envoy.run('git checkout {revision}'.format(revision=revision_hash))
if r.status_code == 0:
return True
return False
def aggregate_git_log(path):
"""Return versions and commits in standard format"""
versions = list()
current_version, current_commits = None, list()
for rev_hash, date, msg in git_log_hash(path):
current_commits.append(msg)
if git_checkout(path=path, revision_hash=rev_hash):
version = get_package_metadata(path=path, field_name='Version')
if version != current_version:
# memorize it
versions.insert(0,
dict(version=version,
date=datetime.strptime(date.rsplit(' ', 1)[0], '%Y-%m-%d %H:%M:%S'),
sections=dict(notes='',
items=reversed(current_commits))))
current_version, current_commits = version, list()
if current_commits:
versions.insert(0,
dict(version='newest',
date=None,
sections=dict(notes='',
items=reversed(current_commits))))
return versions
|
Python
| 0.000003 |
@@ -881,16 +881,21 @@
return
+list(
reversed
@@ -899,16 +899,17 @@
sed(lst)
+)
%0A%0A%0Adef g
@@ -1810,32 +1810,33 @@
sections=
+%5B
dict(notes='',%0A
@@ -1877,38 +1877,44 @@
+
items=
+list(
reversed(current
@@ -1920,24 +1920,26 @@
t_commits)))
+%5D)
)%0A%0A
@@ -2173,16 +2173,17 @@
ections=
+%5B
dict(not
@@ -2238,16 +2238,21 @@
items=
+list(
reversed
@@ -2270,16 +2270,18 @@
mmits)))
+%5D)
)%0A%0A r
|
7ecec2d2b516d9ae22a3a0f652424045d547d811
|
Put object_tools in the correct order in settings
|
test_settings.py
|
test_settings.py
|
DEBUG = True
DATABASE_ENGINE = 'sqlite3'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
SECRET_KEY = '123'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'object_tools',
'object_tools.tests',
]
ROOT_URLCONF = 'object_tools.tests.urls'
STATIC_URL = '/static/'
|
Python
| 0.000037 |
@@ -280,24 +280,44 @@
.sessions',%0A
+ 'object_tools',%0A
'django.
@@ -332,36 +332,16 @@
admin',%0A
- 'object_tools',%0A
'obj
@@ -356,17 +356,16 @@
s.tests'
-,
%0A%5D%0A%0AROOT
|
35296b1c87a86a87fbcf317e26a497fc91c287c7
|
Update receiver to catch value error
|
lexos/receivers/kmeans_receiver.py
|
lexos/receivers/kmeans_receiver.py
|
from typing import NamedTuple
from lexos.models.filemanager_model import FileManagerModel
from lexos.receivers.base_receiver import BaseReceiver
class KMeansOption(NamedTuple):
"""The typed tuple to hold kmeans options."""
n_init: int # number of iterations with different centroids.
k_value: int # k value-for k-means analysis. (k groups)
max_iter: int # maximum number of iterations.
tolerance: float # relative tolerance, inertia to declare convergence.
init_method: str # method of initialization: "K++" or "random".
class KMeansReceiver(BaseReceiver):
def options_from_front_end(self) -> KMeansOption:
"""Get the K-means option from front end.
:return: a KmeansOption object to hold all the options.
"""
n_init = int(self._front_end_data['n_init'])
k_value = int(self._front_end_data['nclusters'])
max_iter = int(self._front_end_data['max_iter'])
tolerance = float(self._front_end_data['tolerance'])
init_method = self._front_end_data['init']
# Check if no input, use the default k value.
if k_value == '':
k_value = int(len(FileManagerModel().load_file_manager().
get_active_files()) / 2)
return KMeansOption(n_init=n_init,
k_value=k_value,
max_iter=max_iter,
tolerance=tolerance,
init_method=init_method)
|
Python
| 0 |
@@ -821,65 +821,8 @@
'%5D)%0A
- k_value = int(self._front_end_data%5B'nclusters'%5D)%0A
@@ -1014,16 +1014,31 @@
no input
+ from front-end
, use th
@@ -1068,24 +1068,99 @@
-if k_value == ''
+try:%0A k_value = int(self._front_end_data%5B'nclusters'%5D)%0A except ValueError
:%0A
|
4b6306a0a2d589fc8db79e3dabf5339a16f80f19
|
fix typo in notification
|
app/models/notification.py
|
app/models/notification.py
|
#!/usr/bin/env python3
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy import ForeignKeyConstraint
from datetime import datetime
from app import db, login_manager as lm
from random import randint
from flask.ext.login import UserMixin
from werkzeug.security import generate_password_hash, \
check_password_hash
from flask.ext.babel import gettext as _
from .review import *
from .course import *
from .user import *
from .image import *
from .forum import *
from .note import *
from .share import *
class Notification(db.Model):
__tablename__ = 'notifications'
id = db.Column(db.Integer, primary_key=True)
to_user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
from_user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
date = db.Column(db.DateTime, default=datetime.utcnow().date())
time = db.Column(db.DateTime, default=datetime.utcnow())
operation = db.Column(db.String(50), nullable=False)
ref_class = db.Column(db.String(50))
ref_obj_id = db.Column(db.Integer)
to_user = db.relationship('User', foreign_keys=to_user_id, backref=db.backref('notifications', order_by='desc(Notification.time)'))
from_user = db.relationship('User', foreign_keys=from_user_id)
def __init__(self, to_user, from_user, operation, ref_obj, ref_class=None):
self.to_user = to_user
self.from_user = from_user
self.date = datetime.utcnow().date()
self.time = datetime.utcnow()
self.operation = operation
self.ref_class = ref_class or ref_obj.__class__.__name__
self.ref_obj_id = ref_obj.id
def save(self):
db.session.add(self)
db.session.commit()
@property
def ref_obj(self):
if ref_class == 'Review':
return Review.query.get(Review.id == ref_obj_id)
elif ref_class == 'ReviewComment':
return ReviewComment.query.get(ReviewComment.id == ref_obj_id)
elif ref_class == 'Course':
return Course.query.get(Course.id == ref_obj_id)
elif ref_class == 'User':
return User.query.get(User.id == ref_obj_id)
elif ref_class == 'Teacher':
return Teacher.query.get(Teacher.id == ref_obj_id)
else:
return None
@property
def class_name(self):
class_names = {
'Review': '点评',
'ReviewComment': '评论',
'Course': '课程',
'User': '用户',
'Teacher': '老师',
}
if self.ref_class in class_names:
return class_names[self.ref_class]
else:
return 'doge'
@property
def ref_obj_name(self):
if self.ref_class == 'Review':
return '课程「' + self.ref_obj.review.link + '」中 ' + self.ref_obj.author.link + ' 的点评'
elif self.ref_class == 'ReviewComment':
return '课程「' + self.ref_obj.review.link + '」中 ' + self.ref_obj.review.author.link + ' 的点评的 ' + self.ref_obj.author.link + ' 的评论'
elif self.ref_class == 'Course':
return '课程「' + self.ref_obj.link + '」'
elif self.ref_class == 'User':
if self.ref_obj == current_user:
return '你'
else:
return '用户「' + self.ref_obj.link + '」'
elif self.ref_class == 'Teacher':
return '老师「' + self.ref_obj.link + '」'
else:
return 'doge'
@property
def operation_text(self):
if operation == 'mention':
return '在' + self.ref_obj_name + '中提到了你'
elif operation == 'upvote':
return '给你在' + self.ref_obj_name + '点了个赞'
elif operation == 'downvote':
return '给你在' + self.ref_obj_name + '点了个反对'
elif operation == 'comment':
return '评论了' + self.ref_obj_name
elif operation == 'review':
return '点评了' + self.ref_obj_name
elif operation == 'follow':
return '关注了' + self.ref_obj_name
else:
return 'doge'
@property
def display_text(self):
return self.from_user.link + ' ' + self.operation_text
@property
def link(self):
return self.ref_obj.link
|
Python
| 0.000245 |
@@ -1747,16 +1747,21 @@
if
+self.
ref_clas
@@ -1767,32 +1767,32 @@
ss == 'Review':%0A
-
retu
@@ -1841,32 +1841,37 @@
d)%0A elif
+self.
ref_class == 'Re
@@ -1964,32 +1964,37 @@
d)%0A elif
+self.
ref_class == 'Co
@@ -2066,32 +2066,37 @@
d)%0A elif
+self.
ref_class == 'Us
@@ -2149,32 +2149,32 @@
== ref_obj_id)%0A
-
elif ref
@@ -2170,16 +2170,21 @@
elif
+self.
ref_clas
@@ -3482,16 +3482,21 @@
if
+self.
operatio
@@ -3511,16 +3511,16 @@
ntion':%0A
-
@@ -3569,32 +3569,37 @@
%E4%BD%A0'%0A elif
+self.
operation == 'up
@@ -3664,32 +3664,37 @@
%E8%B5%9E'%0A elif
+self.
operation == 'do
@@ -3762,32 +3762,37 @@
%E5%AF%B9'%0A elif
+self.
operation == 'co
@@ -3849,32 +3849,37 @@
me%0A elif
+self.
operation == 're
@@ -3922,32 +3922,32 @@
lf.ref_obj_name%0A
-
elif ope
@@ -3943,16 +3943,21 @@
elif
+self.
operatio
|
63bd4b5ed96cd7fd3181ce1ebb7bf63cdda6384e
|
Version 1.6.0
|
sigopt/version.py
|
sigopt/version.py
|
VERSION = '1.5.2'
|
Python
| 0 |
@@ -10,9 +10,9 @@
'1.
-5.2
+6.0
'%0A
|
b6f6eb362c8637839cbce4bd133895e73a695cc0
|
Fix wrong args
|
cupy/cuda/compiler.py
|
cupy/cuda/compiler.py
|
import hashlib
import os
import re
import tempfile
import filelock
from pynvrtc import compiler
import six
from cupy.cuda import device
from cupy.cuda import function
_nvrtc_version = None
def _get_nvrtc_version():
global _nvrtc_version
if _nvrtc_version is None:
interface = compiler.NVRTCInterface()
_nvrtc_version = interface.nvrtcVersion()
return _nvrtc_version
def _get_arch():
cc = device.Device().compute_capability
return 'compute_%s' % cc
class TemporaryDirectory(object):
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, exc_type, exc_value, traceback):
if exc_value is not None:
return
for name in os.listdir(self.path):
os.unlink(os.path.join(self.path, name))
os.rmdir(self.path)
def nvrtc(source, options=(), arch=None):
if not arch:
arch = _get_arch()
options += ('-arch{}'.format(arch),)
with TemporaryDirectory() as root_dir:
path = os.path.join(root_dir, 'kern')
cu_path = '%s.cu' % path
with open(cu_path, 'w') as cu_file:
cu_file.write(source)
prog = compiler.Program(
six.b(source), six.b(os.path.basename(cu_path)))
ptx = prog.compile([six.b(o) for o in options])
return six.b(ptx)
def preprocess(source, options=()):
pp_src = compiler.Program(six.b(source), six.b('')).compile()
if isinstance(pp_src, six.binary_type):
pp_src = pp_src.decode('utf-8')
return re.sub('(?m)^#.*$', '', pp_src)
_default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')
def get_cache_dir():
return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)
_empty_file_preprocess_cache = {}
def compile_with_cache(source, options=(), arch=None, cache_dir=None):
global _empty_file_preprocess_cache
if cache_dir is None:
cache_dir = get_cache_dir()
if arch is None:
arch = _get_arch()
options += ('-ftz=true',)
env = (arch, options, _get_nvrtc_version())
if '#include' in source:
pp_src = '%s %s' % (env, preprocess(source, options))
else:
base = _empty_file_preprocess_cache.get(env, None)
if base is None:
base = _empty_file_preprocess_cache[env] = preprocess('', options)
pp_src = '%s %s %s' % (env, base, source)
if isinstance(pp_src, six.text_type):
pp_src = pp_src.encode('utf-8')
name = '%s.cubin' % hashlib.md5(pp_src).hexdigest()
mod = function.Module()
if not os.path.isdir(cache_dir):
try:
os.makedirs(cache_dir)
except OSError:
if not os.path.isdir(cache_dir):
raise
lock_path = os.path.join(cache_dir, 'lock_file.lock')
path = os.path.join(cache_dir, name)
with filelock.FileLock(lock_path) as lock:
if os.path.exists(path):
with open(path, 'rb') as file:
cubin = file.read()
else:
lock.release()
cubin = nvrtc(source, options, arch)
lock.acquire()
with open(path, 'wb') as cubin_file:
cubin_file.write(cubin)
mod.load(cubin)
return mod
|
Python
| 0.999193 |
@@ -956,16 +956,17 @@
('-arch
+=
%7B%7D'.form
|
dcd0a51e2a47174c5e1067575905056d1578319d
|
fix docstring
|
src/acquisition/covid_hosp/common/utils.py
|
src/acquisition/covid_hosp/common/utils.py
|
"""Code shared among multiple `covid_hosp` scrapers."""
# standard library
import datetime
import re
import pandas as pd
class CovidHospException(Exception):
"""Exception raised exclusively by `covid_hosp` utilities."""
class Utils:
# regex to extract issue date from revision field
# example revision: "Mon, 11/16/2020 - 00:55"
REVISION_PATTERN = re.compile(r'^.*\s(\d+)/(\d+)/(\d+)\s.*$')
def launch_if_main(entrypoint, runtime_name):
"""Call the given function in the main entry point, otherwise no-op."""
if runtime_name == '__main__':
entrypoint()
def int_from_date(date):
"""Convert a YYYY/MM/DD date from a string to a YYYYMMDD int.
Parameters
----------
date : str
Date in "YYYY/MM/DD.*" format.
Returns
-------
int
Date in YYYYMMDD format.
"""
return int(date[:10].replace('/', '').replace('-', ''))
def parse_bool(value):
"""Convert a string to a boolean.
Parameters
----------
value : str
Boolean-like value, like "true" or "false".
Returns
-------
bool
If the string contains some version of "true" or "false".
None
If the string is None or empty.
Raises
------
CovidHospException
If the string constains something other than a version of "true" or
"false".
"""
if not value:
return None
if value.lower() == 'true':
return True
if value.lower() == 'false':
return False
raise CovidHospException(f'cannot convert "{value}" to bool')
def issues_to_fetch(metadata, newer_than, older_than):
"""
Construct all issue dates and URLs to be ingested based on metadata.
Parameters
----------
metadata pd.DataFrame
HHS metadata indexed by issue date and with column "Archive Link"
newer_than Date
Lower bound (exclusive) of days to get issues for.
older_than Date
Upper bound (exclusive) of days to get issues for
Returns
-------
Dictionary of {issue day: list of (download urls, index)}
for issues after newer_than and before older_than
"""
daily_issues = {}
for index in sorted(set(metadata.index)):
day = index.date()
if day > newer_than and day < older_than:
urls = metadata.loc[index, "Archive Link"]
urls_list = [(urls, index)] if isinstance(urls, str) else [(url, index) for url in urls]
if day not in daily_issues:
daily_issues[day] = urls_list
else:
daily_issues[day] += urls_list
return daily_issues
@staticmethod
def merge_by_key_cols(dfs, key_cols):
"""Merge a list of data frames as a series of updates.
Parameters:
-----------
dfs : list(pd.DataFrame)
Data frames to merge, ordered from earliest to latest.
key_cols: list(str)
Columns to use as the index.
Returns a single data frame containing the most recent data for each state+date.
"""
dfs = [df.set_index(key_cols) for df in dfs
if not all(k in df.index.names for k in key_cols)]
result = dfs[0]
for df in dfs[1:]:
# update values for existing keys
result.update(df)
# add any new keys.
## repeated concatenation in pandas is expensive, but (1) we don't expect
## batch sizes to be terribly large (7 files max) and (2) this way we can
## more easily capture the next iteration's updates to any new keys
new_rows = df.loc[[i for i in df.index.to_list() if i not in result.index.to_list()]]
result = pd.concat([result, new_rows])
# convert the index rows back to columns
return result.reset_index(level=key_cols)
@staticmethod
def update_dataset(database, network, newer_than=None, older_than=None):
"""Acquire the most recent dataset, unless it was previously acquired.
Parameters
----------
database : delphi.epidata.acquisition.covid_hosp.common.database.Database
A `Database` subclass for a particular dataset.
network : delphi.epidata.acquisition.covid_hosp.common.network.Network
A `Network` subclass for a particular dataset.
newer_than Date
Lower bound (exclusive) of days to get issues for.
older_than Date
Upper bound (exclusive) of days to get issues for
Returns
-------
bool
Whether a new dataset was acquired.
"""
metadata = network.fetch_metadata()
with database.connect() as db:
max_issue = db.get_max_issue()
older_than = datetime.datetime.today().date() if newer_than is None else older_than
newer_than = max_issue if newer_than is None else newer_than
daily_issues = Utils.issues_to_fetch(metadata, newer_than, older_than)
if not daily_issues:
print("no new issues, nothing to do")
return False
for issue, revisions in daily_issues.items():
issue_int = int(issue.strftime("%Y%m%d"))
# download the dataset and add it to the database
dataset = Utils.merge_by_key_cols([network.fetch_dataset(url) for url, _ in revisions],
db.KEY_COLS)
db.insert_dataset(issue_int, dataset)
# add metadata to the database using the last revision seen.
last_url, last_index = revisions[-1]
metadata_json = metadata.loc[last_index].reset_index().to_json()
db.insert_metadata(issue_int, last_url, metadata_json)
print(f'successfully acquired {len(dataset)} rows')
# note that the transaction is committed by exiting the `with` block
return True
|
Python
| 0.000018 |
@@ -4131,33 +4131,35 @@
%0A newer_than
-D
+: d
ate%0A Lower
@@ -4210,33 +4210,35 @@
%0A older_than
-D
+: d
ate%0A Upper
|
89d83b9ca8c1c52537aae0c5339b0cb5ae64c6c4
|
Add additional test for template filters: for filter queries and filter with variable argument
|
tests/filters.py
|
tests/filters.py
|
"""Test cases for variable fields
"""
import unittest
from lighty.templates import Template
from lighty.templates.filter import filter_manager
def simple_filter(value):
return str(value).upper()
filter_manager.register(simple_filter)
def argument_filter(value, arg):
return str(value) + ', ' + str(arg)
filter_manager.register(argument_filter)
def multiarg_filter(value, *args):
return ', '.join([str(arg) for arg in (value, ) + args])
filter_manager.register(multiarg_filter)
class TemplateFiltersTestCase(unittest.TestCase):
"""Test case for block template tag
"""
def assertResult(self, result, value):
assert result == value, 'Error emplate execution: %s' % ' '.join((
result, 'except', value))
def testSimpleFilter(self):
simple_template = Template(name='simple-filter.html')
simple_template.parse("{{ simple_var|simple_filter }}")
result = simple_template.execute({'simple_var': 'Hello'})
self.assertResult(result, 'HELLO')
def testArgFilter(self):
argument_template = Template(name='argument-filter.html')
argument_template.parse('{{ simple_var|argument_filter:"world" }}')
result = argument_template.execute({'simple_var': 'Hello'})
self.assertResult(result, 'Hello, world')
def testMultiargFilter(self):
multiarg_template = Template(name='multiarg-filter.html')
multiarg_template.parse(
'{{ simple_var|multiarg_filter:"John" "Peter" }}')
result = multiarg_template.execute({'simple_var': 'Hello'})
self.assertResult(result, 'Hello, John, Peter')
def test():
suite = unittest.TestSuite()
suite.addTest(TemplateFiltersTestCase('testSimpleFilter'))
suite.addTest(TemplateFiltersTestCase('testArgFilter'))
suite.addTest(TemplateFiltersTestCase('testMultiargFilter'))
return suite
|
Python
| 0 |
@@ -670,16 +670,17 @@
'Error
+t
emplate
@@ -1666,16 +1666,721 @@
eter')%0A%0A
+ def testMultiFilter(self):%0A multifilter_template = Template(name='multifilter.html')%0A multifilter_template.parse(%0A '%7B%7B simple_var%7Csimple_filter%7Cargument_filter:%22world%22 %7D%7D')%0A result = multifilter_template.execute(%7B'simple_var': 'Hello'%7D)%0A self.assertResult(result, 'HELLO, world')%0A%0A def testVaribaleArgFilter(self):%0A varargfilter_template = Template(name='vararg-filter.html')%0A varargfilter_template.parse('%7B%7B simple_var%7Cargument_filter:arg %7D%7D')%0A result = varargfilter_template.execute(%7B%0A 'simple_var': 'Hello',%0A 'arg': 'world'%0A %7D)%0A self.assertResult(result, 'Hello, world')%0A%0A
%0Adef tes
@@ -2601,24 +2601,154 @@
rgFilter'))%0A
+ suite.addTest(TemplateFiltersTestCase('testMultiFilter'))%0A suite.addTest(TemplateFiltersTestCase('testVaribaleArgFilter'))%0A
return s
|
c5382580601e25a9fb5b41f42548a6e49929fae0
|
Put this four languages in options bring problems.
|
wagtailcodeblock/blocks.py
|
wagtailcodeblock/blocks.py
|
from django.forms import Media
from django.utils.translation import ugettext_lazy as _
# Wagtail 2.0 compatibility - new package paths
try:
from wagtail.core.blocks import (
StructBlock,
TextBlock,
ChoiceBlock,
)
except ImportError:
from wagtail.wagtailcore.blocks import (
StructBlock,
TextBlock,
ChoiceBlock,
)
from .settings import (
get_language_choices,
get_theme,
get_prism_version
)
class CodeBlock(StructBlock):
"""
Code Highlighting Block
"""
WCB_LANGUAGES = get_language_choices()
language = ChoiceBlock(choices=WCB_LANGUAGES, help_text=_('Coding language'), label=_('Language'))
code = TextBlock(label=_('Code'))
@property
def media(self):
theme = get_theme()
prism_version = get_prism_version()
if theme:
prism_theme = '-{}'.format(theme)
else:
prism_theme = ""
js_list = [
"https://cdnjs.cloudflare.com/ajax/libs/prism/{}/prism.min.js".format(
prism_version,
),
]
for lang_code, lang_name in self.WCB_LANGUAGES:
js_list.append(
"https://cdnjs.cloudflare.com/ajax/libs/prism/{}/components/prism-{}.min.js".format(
prism_version,
lang_code,
)
)
return Media(
js=js_list,
css={
'all': [
"https://cdnjs.cloudflare.com/ajax/libs/prism/{}/themes/prism{}.min.css".format(
prism_version, prism_theme
),
]
}
)
class Meta:
icon = 'code'
template = 'wagtailcodeblock/code_block.html'
form_classname = 'code-block struct-block'
form_template = 'wagtailcodeblock/code_block_form.html'
|
Python
| 0.000005 |
@@ -607,24 +607,80 @@
_choices()%0D%0A
+ off_languages = %5B'html', 'mathml', 'svg', 'xml'%5D%0D%0A%0D%0A
%0D%0A langua
@@ -1265,16 +1265,157 @@
UAGES:%0D%0A
+ # Review: https://github.com/PrismJS/prism/blob/gh-pages/prism.js#L602%0D%0A if lang_code not in self.off_languages:%0D%0A
@@ -1435,16 +1435,20 @@
ppend(%0D%0A
+
@@ -1553,32 +1553,36 @@
+
prism_version,%0D%0A
@@ -1573,32 +1573,36 @@
prism_version,%0D%0A
+
@@ -1625,35 +1625,43 @@
-)%0D%0A
+ )%0D%0A
)%0D%0A
|
a4d3cb3f90d0d1fe51cdb18f64502f8061798221
|
set global user
|
wallaby/apps/wallabyApp.py
|
wallaby/apps/wallabyApp.py
|
#!/usr/bin/env python
# Copyright (c) by it's authors.
# Some rights reserved. See LICENSE, AUTHORS.
import wallaby.FX as FX
import wallaby.FXUI as FXUI
from wallaby.qt_combat import *
import wallaby.frontends.qt.resource_rc as resource_rc
from wallaby.frontends.qt.loginDialog import *
import twisted.application.service
import sys, time
from twisted.internet import defer
import wallaby.frontends.qt.reactor.threadedselect as threadedselect
from wallaby.frontends.qt.baseqobject import *
from wallaby.common.fxLogger import *
from wallaby.pf.peer import *
from wallaby.pf.peer.debugger import *
from wallaby.pf.room import *
import signal
import wallaby.frontends.qt.resource_rc
import shutil
class WallabyApp(object):
def __init__(self, appName = 'example', checkRoom = None, suggest = False, options=None):
splash = None
FXUI.app = QtGui.QApplication(sys.argv)
FXUI.app.setApplicationName("wallaby - " + appName)
for s in ['16', '32', '64', '128', '256']:
FXUI.app.setWindowIcon(QtGui.QIcon(QtGui.QPixmap(':/icons/images/wallaby_logo_' + s + '.png')))
pixmap = QtGui.QPixmap(":/images/images/wallaby_splash.png")
splash = QtGui.QSplashScreen(pixmap)
splash.show()
splash.raise_()
FXUI.app.processEvents()
if USES_PYSIDE:
import wallaby.frontends.qt.reactor.qtreactor as qtreactor
qtreactor.install()
else:
threadedselect.install()
from twisted.internet import reactor
ii = Interleaver()
reactor.interleave(ii.toInterleave)
reactor.suggestThreadPoolSize(50)
FXUI.mineIcon = QtGui.QIcon(':/icons/images/mine.png')
FXUI.theirsIcon = QtGui.QIcon(':/icons/images/theirs.png')
tapp = twisted.application.service.Application("gui")
service = FXLogger('wallaby.log')
service.setServiceParent(tapp)
service.startService()
FX.appModule = 'wallaby.apps.' + appName
try:
from twisted.plugin import getCache
pkg = __import__(FX.appModule, globals(), locals(), ["*"], 0)
if pkg is not None and len(pkg.__path__) > 0 and os.path.exists(pkg.__path__[0]):
FX.appPath = pkg.__path__[0]
else:
FX.appPath = os.path.join(".", "wallaby", "apps", appName)
except:
FX.appPath = os.path.join(".", "wallaby", "apps", appName)
try:
print "importing", FX.appModule
mod = FX.imp(FX.appModule + '.mainWindow', False)
except:
mod = None
if mod == None:
FX.crit('Module', FX.appModule, 'not found')
reactor.callWhenRunning(self.myQuit)
if USES_PYSIDE: reactor.runReturn()
FXUI.app.exec_()
return
try:
FXUI.mainWindow = mod.MainWindow(self.myQuit, options)
except Exception as e:
import traceback
traceback.print_exc(file=sys.stdout)
from twisted.internet import reactor
reactor.callWhenRunning(self.myQuit)
if USES_PYSIDE: reactor.runReturn()
FXUI.app.exec_()
return
FXUI.mainWindow.setSplash(splash)
from twisted.internet import reactor
reactor.callWhenRunning(self.run, mod, options, checkRoom)
FXUI.mainWindow.enabled = False
FXUI.mainWindow.configure()
FXUI.mainWindow.show()
FXUI.mainWindow.raise_()
signal.signal(signal.SIGINT, self.sigint_handler)
signal.signal(signal.SIGTERM, self.sigint_handler)
# self.gc = GarbageCollector(FXUI.mainWindow, True)
if USES_PYSIDE: reactor.runReturn()
FXUI.app.exec_()
@defer.inlineCallbacks
def run(self, mod, options, checkRoom):
if options is not None:
authenticated = yield FXUI.mainWindow.authenticated(options.username, options.password, options)
else:
authenticated = yield FXUI.mainWindow.authenticated()
count = 0
while not authenticated:
if count == 3: self.realQuit(0)
dlg = LoginDialog()
code = dlg.exec_()
if code == 0: self.realQuit(0)
try:
authenticated = yield FXUI.mainWindow.authenticated(unicode(dlg.userEdit.text()), unicode(dlg.pwdEdit.text()), options)
except:
return
count += 1
from twisted.internet import reactor
if options is not None: FXUI.mainWindow.setDebuggedRooms(options.debug)
FXUI.mainWindow.run(checkRoom)
FXUI.mainWindow.enabled = True
def realQuit(self, shuttime):
if FXUI.app != None:
if shuttime > 0:
print "Shutdown in ", (shuttime-1), "seconds"
from twisted.internet import reactor
reactor.callLater(1, self.realQuit, shuttime-1)
return
from wallaby.frontends.qt.widgets.baseWidget import BaseWidget
if FXUI.mainWindow is not None:
for w in FXUI.mainWindow.findChildren(BaseWidget):
w.deregister(True)
app = FXUI.app
del FXUI.app
del FXUI.mainWindow
print "Stopping Reactor"
from twisted.internet import reactor
reactor.stop()
FX.info("Stopping app")
print "Stopping App"
app.exit()
# FXUI.app.quit()
FX.info("Stopping reactor")
FX.info("Bye")
app = None
def myQuit(self):
print "Set shutdown flag"
FX.shutdown = True
FX.callShutdownCBs()
shuttime = 0
from twisted.internet import reactor
print "Shutdown in ", shuttime, "seconds"
reactor.callLater(1, self.realQuit, shuttime)
def sigint_handler(self, *args):
"""Handler for the SIGINT signal."""
self.myQuit()
|
Python
| 0.000001 |
@@ -4567,16 +4567,55 @@
eactor%0A%0A
+ FX.user = dlg.userEdit.text()%0A%0A
|
b32f01154cce6d7b7572b04e7218b04d052661e0
|
use apply_detection_link in eval
|
examples/ssd/eval.py
|
examples/ssd/eval.py
|
from __future__ import division
import argparse
import sys
import time
import chainer
from chainer import iterators
from chainercv.datasets import voc_detection_label_names
from chainercv.datasets import VOCDetectionDataset
from chainercv.evaluations import eval_detection_voc
from chainercv.links import SSD300
from chainercv.links import SSD512
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model', choices=('ssd300', 'ssd512'), default='ssd300')
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--batchsize', type=int, default=32)
args = parser.parse_args()
if args.model == 'ssd300':
model = SSD300(pretrained_model='voc0712')
elif args.model == 'ssd512':
model = SSD512(pretrained_model='voc0712')
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu()
model.use_preset('evaluate')
dataset = VOCDetectionDataset(
year='2007', split='test', use_difficult=True, return_difficult=True)
iterator = iterators.SerialIterator(
dataset, args.batchsize, repeat=False, shuffle=False)
start_time = time.time()
pred_bboxes = list()
pred_labels = list()
pred_scores = list()
gt_bboxes = list()
gt_labels = list()
gt_difficults = list()
while True:
try:
batch = next(iterator)
except StopIteration:
break
imgs, bboxes, labels, difficults = zip(*batch)
gt_bboxes.extend(bboxes)
gt_labels.extend(labels)
gt_difficults.extend(difficults)
bboxes, labels, scores = model.predict(imgs)
pred_bboxes.extend(bboxes)
pred_labels.extend(labels)
pred_scores.extend(scores)
fps = len(gt_bboxes) / (time.time() - start_time)
sys.stdout.write(
'\r{:d} of {:d} images, {:.2f} FPS'.format(
len(gt_bboxes), len(dataset), fps))
sys.stdout.flush()
eval_ = eval_detection_voc(
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_difficults,
use_07_metric=True)
print()
print('mAP: {:f}'.format(eval_['map']))
for l, name in enumerate(voc_detection_label_names):
if l in eval_:
print('{:s}: {:f}'.format(name, eval_[l]['ap']))
else:
print('{:s}: -'.format(name))
if __name__ == '__main__':
main()
|
Python
| 0 |
@@ -343,16 +343,65 @@
SSD512%0A
+from chainercv.utils import apply_detection_link%0A
%0A%0Adef ma
@@ -1230,251 +1230,40 @@
e()%0A
-%0A
pr
-ed_bboxes = list()%0A pred_labels = list()%0A pred_scores = list()%0A gt_bboxes = list()%0A gt_labels = list()%0A gt_difficults = list()%0A%0A while True:%0A try:%0A batch = next(iterator)%0A except StopIteration:
+ocessed = 0%0A%0A def hook(
%0A
@@ -1275,37 +1275,21 @@
-break%0A%0A imgs,
+pred_
bboxes,
labe
@@ -1284,24 +1284,29 @@
bboxes,
+pred_
labels,
difficul
@@ -1301,98 +1301,32 @@
ls,
-difficults = zip(*batch)%0A gt_bboxes.extend(bboxes)%0A gt_labels.extend(label
+pred_scores, gt_value
s)
+:
%0A
@@ -1335,200 +1335,62 @@
g
-t_difficults.extend(difficults)%0A%0A bboxes, labels, scores = model.predict(imgs)%0A pred_bboxes.extend(bboxes)%0A pred_labels.extend(labels)%0A pred_scores.extend(scor
+lobal processed%0A processed += len(pred_bbox
es)%0A
-%0A
@@ -1403,25 +1403,25 @@
s = len(
-gt_bboxes
+processed
) / (tim
@@ -1545,25 +1545,25 @@
len(
-gt_bboxes
+processed
), len(d
@@ -1601,24 +1601,191 @@
ut.flush()%0A%0A
+ pred_bboxes, pred_labels, pred_scores, gt_values = %5C%0A apply_detection_link(model, iterator, hook=hook)%0A gt_bboxes, gt_labels, gt_difficults = gt_values%0A%0A
eval_ =
|
49707bf137c2ad7c4e0e69e25c47956846b5ee76
|
Use never_cache decorator for views that render JS code
|
tinymce/views.py
|
tinymce/views.py
|
# coding: utf-8
# License: MIT, see LICENSE.txt
"""
django-tinymce4-lite views
"""
from __future__ import absolute_import
import json
import logging
from django import VERSION
from django.core.urlresolvers import reverse
from django.http import JsonResponse, HttpResponse
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from django.conf import settings
from jsmin import jsmin
try:
import enchant
except ImportError:
pass
__all__ = ['spell_check', 'css', 'filebrowser']
logging.basicConfig(format='[%(asctime)s] %(module)s: %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def spell_check(request):
"""
Implements the TinyMCE 4 spellchecker protocol
:param request: Django http request with JSON-RPC payload from TinyMCE 4
containing a language code and a text to check for errors.
:type request: django.http.request.HttpRequest
:return: Django http response containing JSON-RPC payload
with spellcheck results for TinyMCE 4
:rtype: django.http.JsonResponse
"""
data = json.loads(request.body.decode('utf-8'))
output = {'id': data['id']}
error = None
status = 200
try:
from enchant.checker import SpellChecker
if data['params']['lang'] not in enchant.list_languages():
error = 'Missing {0} dictionary!'.format(data['params']['lang'])
raise RuntimeError(error)
checker = SpellChecker(data['params']['lang'])
checker.set_text(strip_tags(data['params']['text']))
output['result'] = {checker.word: checker.suggest() for err in checker}
except NameError:
error = 'The pyenchant package is not installed!'
logger.exception(error)
except RuntimeError:
logger.exception(error)
except Exception:
error = 'Unknown error!'
logger.exception(error)
if error is not None:
output['error'] = error
status = 500
return JsonResponse(output, status=status)
def spell_check_callback(request):
"""
JavaScript callback for TinyMCE4 spellchecker function
:param request: Django http request
:type request: django.http.request.HttpRequest
:return: Django http response with spellchecker callback JavaScript code
:rtype: django.http.HttpResponse
"""
response = HttpResponse(
jsmin(render_to_string('tinymce/spellcheck-callback.js',
request=request)),
content_type='application/javascript')
response['Cache-Control'] = 'no-store'
return response
def css(request):
"""
Custom CSS for TinyMCE 4 widget
By default it fixes widget's position in Django Admin
:param request: Django http request
:type request: django.http.request.HttpRequest
:return: Django http response with CSS file for TinyMCE 4
:rtype: django.http.HttpResponse
"""
if 'grappelli' in settings.INSTALLED_APPS:
margin_left = 0
elif VERSION[0] == 1 and VERSION[1] <= 8:
margin_left = 110 # For old style admin
else:
margin_left = 170 # For Django >= 1.9 style admin
content = render_to_string('tinymce/tinymce4.css',
context={'margin_left': margin_left},
request=request)
response = HttpResponse(content, content_type='text/css')
response['Cache-Control'] = 'no-store'
return response
def filebrowser(request):
"""
JavaScript callback function for `django-filebrowser`_
:param request: Django http request
:type request: django.http.request.HttpRequest
:return: Django http response with filebrowser JavaScript code for for TinyMCE 4
:rtype: django.http.HttpResponse
.. _django-filebrowser: https://github.com/sehmaschine/django-filebrowser
"""
try:
fb_url = request.build_absolute_uri(reverse('fb_browse'))
except:
fb_url = request.build_absolute_uri(reverse('filebrowser:fb_browse'))
content = jsmin(render_to_string('tinymce/filebrowser.js',
context={'fb_url': fb_url},
request=request))
response = HttpResponse(content, content_type='application/javascript')
response['Cache-Control'] = 'no-store'
return response
|
Python
| 0 |
@@ -392,16 +392,70 @@
ettings%0A
+from django.views.decorators.cache import never_cache%0A
from jsm
@@ -2064,24 +2064,37 @@
s=status)%0A%0A%0A
+@never_cache%0A
def spell_ch
@@ -2399,32 +2399,28 @@
%22%22%22%0A re
-sponse =
+turn
HttpRespons
@@ -2588,72 +2588,22 @@
t')%0A
- response%5B'Cache-Control'%5D = 'no-store'%0A return response%0A%0A
+%0A%0A@never_cache
%0Adef
@@ -3158,26 +3158,36 @@
min%0A
-content =
+return HttpResponse(
render_t
@@ -3242,32 +3242,42 @@
+
context=%7B'margin
@@ -3321,32 +3321,42 @@
+
+
request=request)
@@ -3359,135 +3359,73 @@
est)
+,
%0A
-response = HttpResponse(content, content_type='text/css')%0A response%5B'Cache-Control'%5D = 'no-store'%0A return response%0A%0A
+ content_type='text/css')%0A%0A%0A@never_cache
%0Adef
@@ -3988,18 +3988,28 @@
-content =
+return HttpResponse(
jsmi
@@ -4082,32 +4082,42 @@
+
context=%7B'fb_url
@@ -4157,32 +4157,42 @@
+
+
request=request)
@@ -4196,45 +4196,33 @@
st))
+,
%0A
-response = HttpResponse(content,
+
con
@@ -4261,67 +4261,4 @@
t')%0A
- response%5B'Cache-Control'%5D = 'no-store'%0A return response%0A
|
8874af7c0db371f63da687c5398db1c7b80f58cd
|
Fix import of django during install time (for environments like Heroku) (#120)
|
todo/__init__.py
|
todo/__init__.py
|
"""
A multi-user, multi-group task management and assignment system for Django.
"""
__version__ = "2.4.10"
__author__ = "Scot Hacker"
__email__ = "[email protected]"
__url__ = "https://github.com/shacker/django-todo"
__license__ = "BSD License"
from . import check
|
Python
| 0.000005 |
@@ -245,16 +245,22 @@
cense%22%0A%0A
+try:%0A%09
from . i
@@ -271,8 +271,114 @@
t check%0A
+except ModuleNotFoundError:%0A%09# this can happen during install time, if django is not installed yet!%0A%09pass%0A
|
e3c51012a36fd85781824bd6b66c7e5e1d6696a9
|
Add documentation
|
app/resources/companies.py
|
app/resources/companies.py
|
from app.models import Company, PostalCode, CompanyPostalCode
from flask_restful import Resource, reqparse
class Companies(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('country')
parser.add_argument('postal_code')
args = parser.parse_args()
country_code = args.get('country')
postal_code = args.get('postal_code')
company_query = Company.query
if country_code is not None:
company_query = company_query.filter(Company.country_code == country_code)
if country_code is not None and postal_code is not None:
postal_code = PostalCode.query.filter(PostalCode.country_code == country_code,
PostalCode.postal_code == postal_code).first()
if postal_code is None:
return 'Country code or postal code not found', 400
company_postal_codes = CompanyPostalCode.query.filter(CompanyPostalCode.postal_code_id == postal_code.id).all()
response = []
for company_postal_code in company_postal_codes:
company = Company.query.get(company_postal_code.company.id)
response.append(company.dictionary())
return response
else:
companies = company_query.all()
companies_array = []
for company in companies:
companies_array.append(company.dictionary())
return companies_array
class SingleCompany(Resource):
def get(self, company_id):
company = Company.query.get(company_id)
if company is None:
return 'Company not found', 400
return company.dictionary()
|
Python
| 0 |
@@ -148,16 +148,1243 @@
(self):%0A
+ %22%22%22%0A List all restaurants%0A ---%0A tags:%0A - Restaurants%0A definitions:%0A - schema:%0A id: Restaurant%0A properties:%0A id:%0A type: integer%0A description: the restaurant's id%0A email:%0A type: string%0A description: the restaurant's email%0A name:%0A type: string%0A description: the restaurant's name%0A logo_url:%0A type: string%0A description: the restaurant's logo url%0A address:%0A type: string%0A description: the restaurant's address%0A phone_number:%0A type: string%0A description: the restaurant's phone number%0A country_code:%0A type: string%0A description: the restaurant's country code%0A%0A responses:%0A 200:%0A description: Lists all restaurants%0A schema:%0A title: Restaurants%0A type: array%0A items:%0A $ref: '#/definitions/Restaurant'%0A %22%22%22%0A
|
d1f15d677ebb0c380e1d0a9824005a3db410909a
|
Fix query exceptions that got broken when i split the exceptions
|
wapiti/exceptions.py
|
wapiti/exceptions.py
|
# encoding: utf-8
# Copyright (c) Ecometrica. All rights reserved.
# Distributed under the BSD license. See LICENSE for details.
"""API exception definitions for wapiti"""
import traceback
from django.conf import settings
from django.http import HttpResponse
class APIBaseException(Exception):
name = "API Error"
def __init__(self, msg='', code=500):
self.msg, self.code = msg, code
def __unicode__(self):
return u"%d %s %s" % (self.code, self.name, self.msg)
def get_resp(self):
resp = HttpResponse(content=self.__unicode__(), status=self.code,
content_type='text/plain')
resp.status_code = self.code
return resp
class APINotFound(APIBaseException):
name = "Not Found"
def __init__(self, msg=''):
super(APINotFound, self).__init__(msg, 404)
class APINotImplemented(APIBaseException):
name = "Not Implemented"
def __init__(self, msg=''):
super(APINotImplemented, self).__init__(msg, 501)
class APIForbidden(APIBaseException):
name = "Forbidden"
def __init__(self, msg=''):
super(APIForbidden, self).__init__(msg, 403)
class APIRateLimit(APIBaseException):
name = "Rate Limited"
def __init__(self, msg=''):
super(APIRateLimit, self).__init__(msg, 420)
class APIMissingParameter(APIBaseException):
name = "Missing Parameter(s)"
def __init__(self, msg='', parameter='', all_parameters=()):
super(APIMissingParameter, self).__init__(msg, 400)
self.msg += "\nParameter missing: " + parameter
if all_parameters:
self.msg = (self.msg + "\nRequired parameters: "
+ ' '.join(all_parameters))
class APIServerError(APIBaseException):
name = "Server Error"
def __init__(self, msg=''):
super(APIServerError, self).__init__(msg, 500)
if settings.DEBUG:
self.msg += u"\nTraceback:\n " + traceback.format_exc().encode('utf-8')
class APIFormatNotSupported(APIBaseException):
name = "Requested Format Not Supported"
def __init__(self, msg='', format=''):
super(APIFormatNotSupported, self).__init__(msg, 406)
self.msg += " Format %s not in supported formats (%s)" % (
format, ', '.join(SUPPORTED_FORMATS)
)
class APICantGrokParameter(APIBaseException):
name = "Can't Parse Parameter(s)"
def __init__(self, msg='', parameter='', value=''):
super(APICantGrokParameter, self).__init__(msg, 400)
self.msg += " I can't decode parameter %s=%s" % (parameter, value)
if settings.DEBUG:
self.msg += "\nTraceback:\n " + traceback.format_exc()
def __unicode__(self):
return u"%d Can't grok: %s" % (self.code, self.msg)
class APIMethodNotAllowed(APIBaseException):
name = "Method Not Allowed"
def __init__(self, msg='', method='', allowed=()):
super(APIMethodNotAllowed, self).__init__(msg, 405)
self.msg = (self.msg + u" Method %s is not allowed." % method)
if allowed:
self.msg = self.msg + " Allowed methods are: " + ', '.join(allowed)
class APIBadSlice(APIBaseException):
name = "Incorrect Slicing Parameters"
def __init__(self, msg='', method='', allowed=()):
super(APIBadSlice, self).__init__(msg, 416)
self.msg = (self.msg + u" Bad slicing specification, or too much data requested.")
class APIPoorlyFormedQuery(APIBaseException):
name = "Poorly Formed Query"
def __init__(self, query_str='', msg=''):
super(APIPoorlyFormedQuery, self).__init__(msg, 400)
self.msg += (
"\nMalformed query string (empty, or invalid json): "
+ query_str + '\n\n' + SearchView.__doc__
)
class APIEvilQuery(APIBaseException):
name = "Evil Query"
def __init__(self, query_str='', msg=''):
super(APIEvilQuery, self).__init__(msg, 400)
self.msg += (
"\nYour query is evil. Following keys is not permitted: "
+ query_str + '\n\n' + SearchView.__doc__
)
|
Python
| 0.000024 |
@@ -254,16 +254,17 @@
sponse%0A%0A
+%0A
class AP
@@ -3464,32 +3464,33 @@
y Formed Query%22%0A
+%0A
def __init__
@@ -3511,32 +3511,89 @@
tr='', msg=''):%0A
+ from wapiti.views.object_views import SearchView%0A
super(AP
@@ -3852,16 +3852,17 @@
Query%22%0A
+%0A
def
@@ -3891,32 +3891,90 @@
tr='', msg=''):%0A
+ from wapiti.views.object_views import SearchView%0A%0A
super(AP
|
1b7d85267e183a3bc4f3b91a54a11550bd8edfb0
|
Change shape to size for backward compatibility
|
ptsemseg/models/frrn.py
|
ptsemseg/models/frrn.py
|
import torch.nn as nn
import torch.nn.functional as F
from ptsemseg.models.utils import *
class frrn(nn.Module):
"""
Full Resolution Residual Networks for Semantic Segmentation
URL: https://arxiv.org/abs/1611.08323
References:
1) Original Author's code: https://github.com/TobyPDE/FRRN
2) TF implementation by @kiwonjoon: https://github.com/hiwonjoon/tf-frrn
"""
def __init__(self, n_classes=21):
super(frrn, self).__init__()
self.n_classes = n_classes
self.conv1 = conv2DBatchNormRelu(3, 48, 5, 1, 2)
self.up_residual_units = []
self.down_residual_units = []
for i in range(3):
self.up_residual_units.append(RU(channels=48, kernel_size=3, strides=1))
self.down_residual_units.append(RU(channels=48, kernel_size=3, strides=1))
self.up_residual_units = nn.ModuleList(self.up_residual_units)
self.down_residual_units = nn.ModuleList(self.down_residual_units)
self.split_conv = nn.Conv2d(48, 32,
kernel_size=1,
padding=0,
stride=1,
bias=True)
# each spec is as (n_blocks, channels, scale)
self.encoder_frru_specs = [[3, 96, 2],
[4, 192, 4],
[2, 384, 8],
[2, 384, 16]]
self.decoder_frru_specs = [[2, 192, 8],
[2, 192, 4],
[2, 96, 2]]
# encoding
prev_channels = 48
self.encoding_frrus = {}
for n_blocks, channels, scale in self.encoder_frru_specs:
for block in range(n_blocks):
key = '_'.join(map(str,['encoding_frru', n_blocks, channels, scale, block]))
setattr(self, key, FRRU(prev_channels=prev_channels, out_channels=channels, scale=scale))
prev_channels = channels
# decoding
self.decoding_frrus = {}
for n_blocks, channels, scale in self.decoder_frru_specs:
# pass through decoding FRRUs
for block in range(n_blocks):
key = '_'.join(map(str,['decoding_frru', n_blocks, channels, scale, block]))
setattr(self, key, FRRU(prev_channels=prev_channels, out_channels=channels, scale=scale))
prev_channels = channels
self.merge_conv = nn.Conv2d(prev_channels+32, 48,
kernel_size=1,
padding=0,
stride=1,
bias=True)
self.classif_conv = nn.Conv2d(48, self.n_classes,
kernel_size=1,
padding=0,
stride=1,
bias=True)
def forward(self, x):
# pass to initial conv
x = self.conv1(x)
# pass through residual units
for i in range(3):
x = self.up_residual_units[i](x)
# divide stream
y = x
z = self.split_conv(x)
prev_channels = 48
# encoding
for n_blocks, channels, scale in self.encoder_frru_specs:
# maxpool bigger feature map
y_pooled = F.max_pool2d(y, stride=2, kernel_size=2, padding=0)
# pass through encoding FRRUs
for block in range(n_blocks):
key = '_'.join(map(str,['encoding_frru', n_blocks, channels, scale, block]))
y, z = getattr(self, key)(y_pooled, z)
prev_channels = channels
# decoding
for n_blocks, channels, scale in self.decoder_frru_specs:
# bilinear upsample smaller feature map
upsample_size = torch.Size([_s*2 for _s in y.shape[-2:]])
y_upsampled = F.upsample(y, size=upsample_size, mode='bilinear')
# pass through decoding FRRUs
for block in range(n_blocks):
key = '_'.join(map(str,['decoding_frru', n_blocks, channels, scale, block]))
#print "Incoming FRRU Size: ", key, y_upsampled.shape, z.shape
y, z = getattr(self, key)(y_upsampled, z)
#print "Outgoing FRRU Size: ", key, y.shape, z.shape
prev_channels = channels
# merge streams
x = torch.cat([F.upsample(y, scale_factor=2, mode='bilinear' ), z], dim=1)
x = self.merge_conv(x)
# pass through residual units
for i in range(3):
x = self.down_residual_units[i](x)
# final 1x1 conv to get classification
x = self.classif_conv(x)
return x
|
Python
| 0.000001 |
@@ -3939,20 +3939,21 @@
s in y.s
-hape
+ize()
%5B-2:%5D%5D)
|
4d2f3ee1343b9aef24f599b8acd07ed8340f0bff
|
convert that to a list so we can measure it's len in a template
|
tndata_backend/notifications/views.py
|
tndata_backend/notifications/views.py
|
from collections import defaultdict
from django.contrib.auth.decorators import user_passes_test
from django.contrib import messages
from django.shortcuts import render, redirect
from . import queue
from .models import GCMMessage
@user_passes_test(lambda u: u.is_staff, login_url='/')
def dashboard(request):
"""A simple dashboard for enqueued GCM notifications."""
jobs = queue.messages() # Get the enqueued messages
ids = [job.args[0] for job, _ in jobs]
message_data = defaultdict(dict)
fields = ['id', 'title', 'user__email', 'message']
messages = GCMMessage.objects.filter(pk__in=ids).values_list(*fields)
for msg in messages:
mid, title, email, message = msg
message_data[mid] = {
'id': mid,
'title': title,
'email': email,
'message': message,
}
jobs = (
(job, scheduled_for, message_data[job.args[0]])
for job, scheduled_for in jobs
)
context = {
'jobs': jobs,
'metrics': ['GCM Message Sent', 'GCM Message Scheduled', ]
}
return render(request, "notifications/index.html", context)
@user_passes_test(lambda u: u.is_staff, login_url='/')
def cancel_job(request):
"""Look for an enqueued job with the given ID and cancel it."""
job_id = request.POST.get('job_id', None)
if request.method == "POST" and job_id:
for job, _ in queue.messages():
if job.id == job_id:
job.cancel()
messages.success(request, "That notification has been cancelled")
break
return redirect("notifications:dashboard")
@user_passes_test(lambda u: u.is_staff, login_url='/')
def cancel_all_jobs(request):
"""Cancels all queued messages."""
if request.method == "POST":
count = 0
for job, _ in queue.messages():
job.cancel()
count += 1
messages.success(request, "Cancelled {} notifications.".format(count))
return redirect("notifications:dashboard")
|
Python
| 0.000116 |
@@ -863,17 +863,17 @@
jobs =
-(
+%5B
%0A
@@ -964,17 +964,17 @@
obs%0A
-)
+%5D
%0A con
|
6618ea7c1b67d87acff86338415e2a322a01cc3c
|
add loopback support
|
testsniff.py
|
testsniff.py
|
#!/usr/bin/env python
import getopt, sys
import pcap
from dpkt.ethernet import Ethernet
def usage():
print >>sys.stderr, 'usage: %s [-i device] [pattern]' % sys.argv[0]
sys.exit(1)
def main():
opts, args = getopt.getopt(sys.argv[1:], 'i:h')
name = None
for o, a in opts:
if o == '-i': name = a
else: usage()
pc = pcap.pcap(name)
pc.setfilter(' '.join(args))
try:
print 'listening on %s: %s' % (pc.name, pc.filter)
for ts, pkt in pc:
print ts, `Ethernet(pkt)`
except KeyboardInterrupt:
nrecv, ndrop, nifdrop = pc.stats()
print '\n%d packets received by filter' % nrecv
print '%d packets dropped by kernel' % ndrop
if __name__ == '__main__':
main()
|
Python
| 0.000001 |
@@ -46,47 +46,18 @@
ort
-pcap%0Afrom dpkt.ethernet import Ethernet
+dpkt, pcap
%0A%0Ade
@@ -311,16 +311,25 @@
usage()%0A
+ %0A
pc =
@@ -378,16 +378,193 @@
(args))%0A
+ decode = %7B pcap.DLT_LOOP:dpkt.loopback.Loopback,%0A pcap.DLT_NULL:dpkt.loopback.Loopback,%0A pcap.DLT_EN10MB:dpkt.ethernet.Ethernet %7D%5Bpc.datalink()%5D%0A
try:
@@ -673,24 +673,22 @@
nt ts, %60
-Ethernet
+decode
(pkt)%60%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.