commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
---|---|---|---|---|---|---|---|
d716098727293c2c90c97a41c57bab57330c176c
|
Fix read_only tests.
|
kitsune/sumo/tests/test_readonly.py
|
kitsune/sumo/tests/test_readonly.py
|
import copy
from django.conf import settings
from django.db import models
from django.db.utils import DatabaseError
from django.test import TestCase
from django.utils import importlib
from nose.tools import assert_raises, eq_
from pyquery import PyQuery as pq
from kitsune.questions.models import Question
from kitsune.sumo.urlresolvers import reverse
from kitsune.users.tests import UserFactory
class ReadOnlyModeTest(TestCase):
extra = ('kitsune.sumo.middleware.ReadOnlyMiddleware',)
def setUp(self):
# This has to be done before the db goes into read only mode.
self.user = UserFactory(password='testpass')
models.signals.pre_save.connect(self.db_error)
models.signals.pre_delete.connect(self.db_error)
self.old_settings = copy.copy(settings._wrapped.__dict__)
settings.SLAVE_DATABASES = ['default']
settings_module = importlib.import_module(settings.SETTINGS_MODULE)
settings_module.read_only_mode(settings._wrapped.__dict__)
self.client.handler.load_middleware()
def tearDown(self):
settings._wrapped.__dict__ = self.old_settings
models.signals.pre_save.disconnect(self.db_error)
models.signals.pre_delete.disconnect(self.db_error)
def db_error(self, *args, **kwargs):
raise DatabaseError("You can't do this in read-only mode.")
def test_db_error(self):
assert_raises(DatabaseError, Question.objects.create, id=12)
def test_login_error(self):
# This tries to do a db write.
url = reverse('users.login', locale='en-US')
r = self.client.post(url, {
'username': self.user.username,
'password': 'testpass',
}, follow=True)
eq_(r.status_code, 503)
title = pq(r.content)('title').text()
assert title.startswith('Maintenance in progress'), title
def test_bail_on_post(self):
r = self.client.post('/en-US/questions')
eq_(r.status_code, 503)
title = pq(r.content)('title').text()
assert title.startswith('Maintenance in progress'), title
|
Python
| 0 |
@@ -146,43 +146,27 @@
Case
-%0Afrom django.utils import importlib
+, override_settings
%0A%0Afr
@@ -800,323 +800,32 @@
__)%0A
- settings.SLAVE_DATABASES = %5B'default'%5D%0A settings_module = importlib.import_module(settings.SETTINGS_MODULE)%0A settings_module.read_only_mode(settings._wrapped.__dict__)%0A self.client.handler.load_middleware()%0A%0A def tearDown(self):%0A settings._wrapped.__dict__ = self.old_settings
+%0A def tearDown(self):
%0A
@@ -1050,16 +1050,55 @@
ode.%22)%0A%0A
+ @override_settings(READ_ONLY=True)%0A
def
@@ -1188,16 +1188,55 @@
id=12)%0A%0A
+ @override_settings(READ_ONLY=True)%0A
def
@@ -1636,16 +1636,55 @@
title%0A%0A
+ @override_settings(READ_ONLY=True)%0A
def
|
9a22cf7452723686a5065658ce5c9d31333c8a33
|
Add download random leader avatar to examples
|
examples/download_random_leader_avatar.py
|
examples/download_random_leader_avatar.py
|
Python
| 0 |
@@ -0,0 +1,1721 @@
+# Run with Python 3%0Aimport json%0Aimport requests%0Afrom random import randint%0Aimport shutil%0Aimport math%0A%0A# 1. Get your keys at https://stepic.org/oauth2/applications/ (client type = confidential,%0A# authorization grant type = client credentials)%0Aclient_id = %22...%22%0Aclient_secret = %22...%22%0A%0A# 2. Get a token%0Aauth = requests.auth.HTTPBasicAuth(client_id, client_secret)%0Aresp = requests.post('https://stepic.org/oauth2/token/',%0A data=%7B'grant_type': 'client_credentials'%7D,%0A auth=auth%0A )%0Atoken = json.loads(resp.text)%5B'access_token'%5D%0A%0A# 3. Call API (https://stepic.org/api/docs/) using this token.%0A%0A# Get leaders by count%0Adef get_leaders(count):%0A pages = math.ceil(count / 20)%0A leaders = %5B%5D%0A for page in range(1, pages + 1):%0A api_url = 'https://stepic.org/api/leaders/?page=%7B%7D'.format(page)%0A response = json.loads(requests.get(api_url, headers=%7B'Authorization': 'Bearer '+ token%7D).text)%0A leaders += response%5B'leaders'%5D%0A if not response%5B'meta'%5D%5B'has_next'%5D:%0A break%0A%0A return leaders%0A%0A# Get user by id%0Adef get_user(id):%0A api_url = 'https://stepic.org/api/users/%7B%7D/'.format(id)%0A return json.loads(requests.get(api_url, headers=%7B'Authorization': 'Bearer '+ token%7D).text)%5B'users'%5D%5B0%5D%0A%0A# Download avatar by user id%0Adef download_avatar(id, filename):%0A avatar_url = get_user(id)%5B'avatar'%5D%0A response = requests.get(avatar_url, stream=True)%0A with open('%7B%7D.png'.format(filename), 'wb') as out_file:%0A shutil.copyfileobj(response.raw, out_file)%0A%0A# Get leader user randomly from 100 leaders and download his avatar%0Arand_leader_id = get_leaders(100)%5Brandint(0, 99)%5D%5B'user'%5D%0Adownload_avatar(rand_leader_id, 'leader')%0A
|
|
e79437b7badcbc7e48e5090e2e27b892c323e829
|
add script to make bedgraph from lumpy probbaly (-P) output
|
scripts/prob_bedpe_to_bedgraph.py
|
scripts/prob_bedpe_to_bedgraph.py
|
Python
| 0 |
@@ -0,0 +1,1133 @@
+#!/usr/bin/env python%0Aimport sys%0Aimport numpy as np%0A%0Afrom optparse import OptionParser%0A%0Aparser = OptionParser()%0A%0Aparser.add_option(%22-b%22,%0A %22--bedpe_file%22,%0A dest=%22bedpe_file%22,%0A help=%22BEDPE file%22)%0A%0Aparser.add_option(%22-n%22,%0A %22--name%22,%0A default=%22LUMPY BedGraph%22,%0A dest=%22name%22,%0A help=%22Name%22)%0A%0A%0A(options, args) = parser.parse_args()%0A%0Aif not options.bedpe_file:%0A parser.error('BEDPE file not given')%0A%0Af = open(options.bedpe_file,'r')%0A%0Aprint 'track type=bedGraph name=%22' + options.name + '%22' %0A%0Afor l in f:%0A A = l.rstrip().split('%5Ct')%0A L=%5Bfloat(x) for x in A%5B11%5D.split()%5D %0A R=%5Bfloat(x) for x in A%5B12%5D.split()%5D %0A%0A l_chr = A%5B0%5D%0A l_start = int(A%5B1%5D)%0A%0A r_chr = A%5B3%5D%0A r_start = int(A%5B4%5D)%0A%0A c = 0%0A for p in L:%0A print '%5Ct'.join( %5Bl_chr,%0A str(l_start + c),%0A str(l_start + c + 1),%0A str(p)%5D)%0A c+=1%0A%0A c = 0%0A for p in R:%0A print '%5Ct'.join( %5Br_chr,%0A str(r_start + c),%0A str(r_start + c + 1),%0A str(p)%5D)%0A c+=1%0A %0A%0Af.close()%0A
|
|
a3b7224da3d45458af59d0ade4814b10e789b24d
|
Fix target capacity calculation
|
senlin/policies/scaling_policy.py
|
senlin/policies/scaling_policy.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from senlin.common import constraints
from senlin.common import consts
from senlin.common import exception
from senlin.common.i18n import _
from senlin.common import scaleutils as su
from senlin.common import schema
from senlin.common import utils
from senlin.db import api as db_api
from senlin.policies import base
class ScalingPolicy(base.Policy):
"""Policy for changing the size of a cluster.
This policy is expected to be enforced before the node count of a cluster
is changed.
"""
VERSION = '1.0'
PRIORITY = 100
TARGET = [
('BEFORE', consts.CLUSTER_SCALE_IN),
('BEFORE', consts.CLUSTER_SCALE_OUT),
]
PROFILE_TYPE = [
'ANY',
]
KEYS = (
EVENT, ADJUSTMENT,
) = (
'event', 'adjustment',
)
_SUPPORTED_EVENTS = (
CLUSTER_SCALE_IN, CLUSTER_SCALE_OUT,
) = (
consts.CLUSTER_SCALE_IN, consts.CLUSTER_SCALE_OUT,
)
_ADJUSTMENT_KEYS = (
ADJUSTMENT_TYPE, ADJUSTMENT_NUMBER, MIN_STEP, BEST_EFFORT,
COOLDOWN,
) = (
'type', 'number', 'min_step', 'best_effort',
'cooldown',
)
properties_schema = {
EVENT: schema.String(
_('Event that will trigger this policy. Must be one of '
'CLUSTER_SCALE_IN and CLUSTER_SCALE_OUT.'),
constraints=[
constraints.AllowedValues(_SUPPORTED_EVENTS),
],
required=True,
),
ADJUSTMENT: schema.Map(
_('Detailed specification for scaling adjustments.'),
schema={
ADJUSTMENT_TYPE: schema.String(
_('Type of adjustment when scaling is triggered.'),
constraints=[
constraints.AllowedValues(consts.ADJUSTMENT_TYPES),
],
default=consts.CHANGE_IN_CAPACITY,
),
ADJUSTMENT_NUMBER: schema.Number(
_('A number specifying the amount of adjustment.'),
default=1,
),
MIN_STEP: schema.Integer(
_('When adjustment type is set to "CHANGE_IN_PERCENTAGE",'
' this specifies the cluster size will be decreased by '
'at least this number of nodes.'),
default=1,
),
BEST_EFFORT: schema.Boolean(
_('Whether do best effort scaling when new size of '
'cluster will break the size limitation'),
default=False,
),
COOLDOWN: schema.Integer(
_('Number of seconds to hold the cluster for cool-down '
'before allowing cluster to be resized again.'),
default=0,
),
}
),
}
def __init__(self, name, spec, **kwargs):
"""Intialize a scaling policy object.
:param name: Name for the policy object.
:param spec: A dictionary containing the detailed specification for
the policy.
:param \*\*kwargs: Other optional parameters for policy object
creation.
:return: An object of `ScalingPolicy`.
"""
super(ScalingPolicy, self).__init__(name, spec, **kwargs)
self.singleton = False
self.event = self.properties[self.EVENT]
adjustment = self.properties[self.ADJUSTMENT]
self.adjustment_type = adjustment[self.ADJUSTMENT_TYPE]
self.adjustment_number = adjustment[self.ADJUSTMENT_NUMBER]
self.adjustment_min_step = adjustment[self.MIN_STEP]
self.best_effort = adjustment[self.BEST_EFFORT]
self.cooldown = adjustment[self.COOLDOWN]
def _calculate_adjustment_count(self, current_size):
"""Calculate adjustment count based on current_size.
:param current_size: The current size of the target cluster.
:return: The number of nodes to add or to remove.
"""
if self.adjustment_type == consts.EXACT_CAPACITY:
if self.event == consts.CLUSTER_SCALE_IN:
count = current_size - self.adjustment_number
else:
count = self.adjustment_number - current_size
elif self.adjustment_type == consts.CHANGE_IN_CAPACITY:
count = self.adjustment_number
else: # consts.CHANGE_IN_PERCENTAGE:
count = int((self.adjustment_number * current_size) / 100.0)
if count < self.adjustment_min_step:
count = self.adjustment_min_step
return count
def pre_op(self, cluster_id, action):
"""The hook function that is executed before the action.
The checking result is stored in the ``data`` property of the action
object rather than returned directly from the function.
:param cluster_id: The ID of the target cluster.
:param action: Action instance against which the policy is being
checked.
:return: None.
"""
# Use action input if count is provided
count = action.inputs.get('count', None)
current = db_api.node_count_by_cluster(action.context, cluster_id)
if count is None:
# count not specified, calculate it
count = self._calculate_adjustment_count(current)
# Count must be positive value
try:
count = utils.parse_int_param('count', count, allow_zero=False)
except exception.InvalidParameter:
action.data.update({
'status': base.CHECK_ERROR,
'reason': _("Invalid count (%(c)s) for action '%(a)s'."
) % {'c': count, 'a': action.action}
})
action.store(action.context)
return
# Check size constraints
cluster = db_api.cluster_get(action.context, cluster_id)
if action.action == consts.CLUSTER_SCALE_IN:
if self.best_effort:
count = current - cluster.min_size
result = su.check_size_params(cluster, current - count,
strict=not self.best_effort)
else:
if self.best_effort:
count = cluster.max_size - current
result = su.check_size_params(cluster, current + count,
strict=not self.best_effort)
if result:
# failed validation
pd = {
'status': base.CHECK_ERROR,
'reason': result
}
else:
# passed validation
pd = {
'status': base.CHECK_OK,
'reason': _('Scaling request validated.'),
}
if action.action == consts.CLUSTER_SCALE_IN:
pd['deletion'] = {'count': count}
else:
pd['creation'] = {'count': count}
action.data.update(pd)
action.store(action.context)
return
def need_check(self, target, action):
res = super(ScalingPolicy, self).need_check(target, action)
if res:
# Check if the action is expected by the policy
res = (self.event == action.action)
return res
|
Python
| 0.000808 |
@@ -6640,32 +6640,43 @@
count =
+ min(count,
current - clust
@@ -6682,24 +6682,25 @@
ter.min_size
+)
%0A
@@ -6897,16 +6897,27 @@
count =
+ min(count,
cluster
@@ -6935,16 +6935,17 @@
current
+)
%0A
|
18728051374484ca93b59d60a4e6941bdc5c6192
|
Add missing migration
|
project/creditor/migrations/0010_auto_20190131_1731.py
|
project/creditor/migrations/0010_auto_20190131_1731.py
|
Python
| 0.0002 |
@@ -0,0 +1,508 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('creditor', '0009_auto_20160123_2128'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='recurringtransaction',%0A name='rtype',%0A field=models.PositiveSmallIntegerField(verbose_name='Recurrence type', choices=%5B(1, 'Monthly'), (2, 'Yearly'), (3, 'Quarterly')%5D),%0A ),%0A %5D%0A
|
|
efecaf8cdb7ca4623d2efd53590adf976fd36954
|
Add test ref #261.
|
setuptools/tests/test_build_py.py
|
setuptools/tests/test_build_py.py
|
Python
| 0 |
@@ -0,0 +1,669 @@
+import os%0A%0Aimport pytest%0A%0Afrom setuptools.dist import Distribution%0A%0A%[email protected]_fixture%0Adef tmpdir_as_cwd(tmpdir):%0A with tmpdir.as_cwd():%0A yield tmpdir%0A%0A%0Adef test_directories_in_package_data_glob(tmpdir_as_cwd):%0A %22%22%22%0A Directories matching the glob in package_data should%0A not be included in the package data.%0A%0A Regression test for #261.%0A %22%22%22%0A dist = Distribution(dict(%0A script_name='setup.py',%0A script_args=%5B'build_py'%5D,%0A packages=%5B''%5D,%0A name='foo',%0A package_data=%7B'': %5B'path/*'%5D%7D,%0A ))%0A os.makedirs('path/subpath')%0A #with contexts.quiet():%0A dist.parse_command_line()%0A dist.run_commands()%0A
|
|
2526c91b77b538e1f37bd279783de0ac5452c463
|
Add test to validate legislative parameters in XML and JSON format.
|
tests/legislation_tests.py
|
tests/legislation_tests.py
|
Python
| 0 |
@@ -0,0 +1,2679 @@
+# -*- coding: utf-8 -*-%0A%0A%0A# OpenFisca -- A versatile microsimulation software%0A# By: OpenFisca Team %[email protected]%3E%0A#%0A# Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team%0A# https://github.com/openfisca%0A#%0A# This file is part of OpenFisca.%0A#%0A# OpenFisca is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Affero General Public License as%0A# published by the Free Software Foundation, either version 3 of the%0A# License, or (at your option) any later version.%0A#%0A# OpenFisca is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU Affero General Public License for more details.%0A#%0A# You should have received a copy of the GNU Affero General Public License%0A# along with this program. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A%0A%0Aimport json%0Aimport xml.etree.ElementTree%0A%0Aimport openfisca_france%0Aopenfisca_france.init_country()%0A%0Afrom openfisca_core import conv, legislations, legislationsxml, model%0A%0A%0Adef test_legislation_xml_file():%0A legislation_tree = xml.etree.ElementTree.parse(model.PARAM_FILE)%0A legislation_xml_json = conv.check(legislationsxml.xml_legislation_to_json)(legislation_tree.getroot(),%0A state = conv.default_state)%0A%0A legislation_xml_json, errors = legislationsxml.validate_node_xml_json(legislation_xml_json,%0A state = conv.default_state)%0A if errors is not None:%0A errors = conv.embed_error(legislation_xml_json, 'errors', errors)%0A if errors is None:%0A raise ValueError(unicode(json.dumps(legislation_xml_json, ensure_ascii = False,%0A indent = 2)).encode('utf-8'))%0A raise ValueError(u'%7B0%7D for: %7B1%7D'.format(%0A unicode(json.dumps(errors, ensure_ascii = False, indent = 2, sort_keys = True)),%0A unicode(json.dumps(legislation_xml_json, ensure_ascii = False, indent = 2)),%0A ).encode('utf-8'))%0A%0A legislation_json = legislationsxml.transform_node_xml_json_to_json(legislation_xml_json)%0A%0A legislation_json, errors = legislations.validate_node_json(legislation_json, state = conv.default_state)%0A if errors is not None:%0A errors = conv.embed_error(legislation_json, 'errors', errors)%0A if errors is None:%0A raise ValueError(unicode(json.dumps(legislation_json, ensure_ascii = False, indent = 2)).encode('utf-8'))%0A raise ValueError(u'%7B0%7D for: %7B1%7D'.format(%0A unicode(json.dumps(errors, ensure_ascii = False, indent = 2, sort_keys = True)),%0A unicode(json.dumps(legislation_json, ensure_ascii = False, indent = 2)),%0A ).encode('utf-8'))%0A
|
|
5f3af12d40e7c9ff388385e408d65565cb916def
|
Add Swagger integration test
|
openfisca_web_api/tests/test_swagger_integration.py
|
openfisca_web_api/tests/test_swagger_integration.py
|
Python
| 0 |
@@ -0,0 +1,1150 @@
+# -*- coding: utf-8 -*-%0A%0A%0A# OpenFisca -- A versatile microsimulation software%0A# By: OpenFisca Team %[email protected]%3E%0A#%0A# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team%0A# https://github.com/openfisca%0A#%0A# This file is part of OpenFisca.%0A#%0A# OpenFisca is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Affero General Public License as%0A# published by the Free Software Foundation, either version 3 of the%0A# License, or (at your option) any later version.%0A#%0A# OpenFisca is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU Affero General Public License for more details.%0A#%0A# You should have received a copy of the GNU Affero General Public License%0A# along with this program. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A%0Afrom nose.tools import assert_greater%0A%0Afrom ..controllers.swagger import (%0A build_paths,%0A )%0Afrom . import common%0A%0A%0Adef setup_module(module):%0A common.get_or_load_app()%0A%0A%0Adef smoke_test_build_paths():%0A assert_greater(build_paths(), 100)%0A
|
|
0507a47f1c15bac5f6eddbeb9c712f5c2b2a9358
|
Add tests for msgpack reader.
|
intake_bluesky/tests/test_msgpack.py
|
intake_bluesky/tests/test_msgpack.py
|
Python
| 0 |
@@ -0,0 +1,1551 @@
+import intake_bluesky.msgpack # noqa%0Aimport intake%0Afrom suitcase.msgpack import Serializer%0Aimport os%0Aimport pytest%0Aimport shutil%0Aimport tempfile%0Aimport time%0Aimport types%0A%0Afrom .generic import * # noqa%0A%0ATMP_DIR = tempfile.mkdtemp()%0ATEST_CATALOG_PATH = %5BTMP_DIR%5D%0A%0AYAML_FILENAME = 'intake_msgpack_test_catalog.yml'%0A%0A%0Adef teardown_module(module):%0A try:%0A shutil.rmtree(TMP_DIR)%0A except BaseException:%0A pass%0A%0A%[email protected](params=%5B'local', 'remote'%5D)%0Adef bundle(request, intake_server, example_data, tmp_path): # noqa%0A serializer = Serializer(tmp_path)%0A uid, docs = example_data%0A for name, doc in docs:%0A serializer(name, doc)%0A serializer.close()%0A%0A fullname = os.path.join(TMP_DIR, YAML_FILENAME)%0A with open(fullname, 'w') as f:%0A f.write(f'''%0Aplugins:%0A source:%0A - module: intake_bluesky%0Asources:%0A xyz:%0A description: Some imaginary beamline%0A driver: intake_bluesky.msgpack.BlueskyMsgpackCatalog%0A container: catalog%0A args:%0A paths: %7B%5Bstr(path) for path in serializer.artifacts%5B'all'%5D%5D%7D%0A handler_registry:%0A NPY_SEQ: ophyd.sim.NumpySeqHandler%0A metadata:%0A beamline: %2200-ID%22%0A ''')%0A%0A time.sleep(2)%0A%0A if request.param == 'local':%0A cat = intake.Catalog(os.path.join(TMP_DIR, YAML_FILENAME))%0A elif request.param == 'remote':%0A cat = intake.Catalog(intake_server, page_size=10)%0A else:%0A raise ValueError%0A return types.SimpleNamespace(cat=cat,%0A uid=uid,%0A docs=docs)%0A
|
|
2e1d70391b26ae353ca95ce25a08d59f1d8f9f9e
|
Create multifilebuilder_gtk3.py
|
lib/python/multifilebuilder_gtk3.py
|
lib/python/multifilebuilder_gtk3.py
|
Python
| 0 |
@@ -0,0 +1,1902 @@
+# -*- python -*-%0A# Copyright (C) 2014 Jeff Epler %[email protected]%3E%0A#%0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU General Public License as published by%0A# the Free Software Foundation; either version 2 of the License, or%0A# (at your option) any later version.%0A#%0A# This program is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU General Public License for more details.%0A#%0A# You should have received a copy of the GNU General Public License%0A# along with this program; if not, write to the Free Software%0A# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA%0Afrom gi.repository import Gtk%0A%0A__all__ = %5B'MultiFileBuilder'%5D%0A%0Aclass MultiFileBuilder:%0A def __init__(self):%0A self.builders = %5B%5D%0A self.domain = None%0A%0A def set_translation_domain(self, domain):%0A self.domain = domain%0A%0A def connect_signals(self, obj):%0A for b in self.builders: b.connect_signals(obj)%0A%0A def add_from_file(self, fn):%0A builder = Gtk.Builder()%0A if self.domain is not None: builder.set_translation_domain(self.domain)%0A%0A self.builders.append(builder)%0A builder.add_from_file(fn)%0A%0A def add_from_string(self, strg):%0A builder = Gtk.Builder()%0A if self.domain is not None: builder.set_translation_domain(self.domain)%0A%0A self.builders.append(builder)%0A builder.add_from_string(strg)%0A%0A def get_object(self, obj):%0A objects = %5Bbuilder.get_object(obj) for builder in self.builders%5D%0A objects = %5Bo for o in objects if o%5D%0A if not objects: return None%0A if len(objects) %3E 1: raise ValueError, %22%22%22Use of object with duplicate ID -%3E '%25s'%22%22%22%25 obj%0A return objects%5B0%5D%0A %0A
|
|
216c7c21adcbec601ebcc624eac4b5087422e5d2
|
add test for sugarsource
|
sncosmo/tests/test_sugarsource.py
|
sncosmo/tests/test_sugarsource.py
|
Python
| 0.000001 |
@@ -0,0 +1,1955 @@
+# Licensed under a 3-clause BSD style license - see LICENSES%0A%0A%22%22%22Tests for SUGARSource (and wrapped in Model)%22%22%22%0A%0Aimport os%0A%0Aimport numpy as np%0Aimport pytest%0Afrom numpy.testing import assert_allclose%0A%0Aimport sncosmo%0A%0A%0Adef sugar_model(Xgr=0, q1=0, q2=0, q3=0, A=0,%0A phase=np.linspace(-5, 30, 10),%0A wave=np.linspace(4000, 8000, 10)):%0A %22%22%22%0A Give a spectral time series of SUGAR model%0A for a given set of parameters.%0A %22%22%22%0A source = sncosmo.get_source('sugar')%0A%0A mag_sugar = source._model%5B'M0'%5D(phase, wave)%0A%0A keys = %5B'ALPHA1', 'ALPHA2', 'ALPHA3', 'CCM'%5D%0A parameters = %5Bq1, q2, q3, A%5D%0A for i in range(4):%0A comp = source._model%5Bkeys%5Bi%5D%5D(phase, wave) * parameters%5Bi%5D%0A mag_sugar += comp%0A # Mag AB used in the training of SUGAR.%0A mag_sugar += 48.59%0A wave_factor = (wave ** 2 / 299792458. * 1.e-10)%0A return (Xgr * 10. ** (-0.4 * mag_sugar) / wave_factor)%0A%0A%[email protected]_download%0Adef test_sugarsource():%0A %22%22%22Test timeseries output from SUGARSource vs pregenerated timeseries%0A from the original files.%22%22%22%0A%0A source = sncosmo.get_source(%22sugar%22)%0A model = sncosmo.Model(source)%0A%0A q1 = %5B-1, 0, 1, 2%5D%0A q2 = %5B1, 0, -1, -2%5D%0A q3 = %5B-1, 1, 0, -2%5D%0A A = %5B-0.1, 0, 0.2, 0.5%5D%0A Xgr = %5B10**(-0.4 * 34), 10**(-0.4 * 33),%0A 10**(-0.4 * 38), 10**(-0.4 * 42)%5D%0A%0A time = np.linspace(-5, 30, 10)%0A wave = np.linspace(4000, 8000, 10)%0A%0A for i in range(len(q1)):%0A%0A fluxref = sugar_model(Xgr=Xgr%5Bi%5D,%0A q1=q1%5Bi%5D,%0A q2=q2%5Bi%5D,%0A q3=q3%5Bi%5D,%0A A=A%5Bi%5D,%0A phase=time,%0A wave=wave)%0A%0A model.set(z=0, t0=0, Xgr=Xgr%5Bi%5D,%0A q1=q1%5Bi%5D, q2=q2%5Bi%5D,%0A q3=q3%5Bi%5D, A=A%5Bi%5D)%0A flux = model.flux(time, wave)%0A assert_allclose(flux, fluxref, rtol=1e-13)%0A
|
|
da85d929118a9ac51a112a405818838e476a2f80
|
Add blank test for updating later..
|
tests/test_pi_mqtt_gpio.py
|
tests/test_pi_mqtt_gpio.py
|
Python
| 0 |
@@ -0,0 +1,26 @@
+def test_noop():%0A pass%0A
|
|
06455d743590e47bfe5c9e1a6ff745622abe9cb5
|
add tests for polymorphism
|
tests/test_polymorphism.py
|
tests/test_polymorphism.py
|
Python
| 0 |
@@ -0,0 +1,828 @@
+import pytest%0A%0Afrom dataclasses import dataclass%0Afrom hologram import JsonSchemaMixin, ValidationError%0Afrom hologram.helpers import StrEnum, StrLiteral%0Afrom typing import Union%0A%0A%0Aclass Bar(StrEnum):%0A x = %22x%22%0A y = %22y%22%0A%0A%0A@dataclass%0Aclass BarX(JsonSchemaMixin):%0A bar: StrLiteral(Bar.x)%0A%0A%0A@dataclass%0Aclass BarY(JsonSchemaMixin):%0A bar: StrLiteral(Bar.y)%0A%0A%0A@dataclass%0Aclass Foo(JsonSchemaMixin):%0A foo: Union%5BBarX, BarY%5D%0A%0A%0Adef test_symmetry():%0A def assert_symmetry(value):%0A assert Foo.from_dict(value).to_dict() == value%0A%0A assert_symmetry(%7B%22foo%22: %7B%22bar%22: %22x%22%7D%7D)%0A assert_symmetry(%7B%22foo%22: %7B%22bar%22: %22y%22%7D%7D)%0A%0A%0Adef test_subclasses():%0A foo_x = Foo.from_dict(%7B%22foo%22: %7B%22bar%22: %22x%22%7D%7D)%0A assert isinstance(foo_x.foo, BarX)%0A%0A foo_y = Foo.from_dict(%7B%22foo%22: %7B%22bar%22: %22y%22%7D%7D)%0A assert isinstance(foo_y.foo, BarY)%0A
|
|
64fac50f77c492edf20b0e4161b9da988831f2ed
|
change author can also be null
|
src/c3nav/editor/migrations/0002_auto_20170612_1615.py
|
src/c3nav/editor/migrations/0002_auto_20170612_1615.py
|
Python
| 0.000368 |
@@ -0,0 +1,623 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.2 on 2017-06-12 16:15%0Afrom __future__ import unicode_literals%0A%0Afrom django.conf import settings%0Afrom django.db import migrations, models%0Aimport django.db.models.deletion%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('editor', '0001_initial'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='change',%0A name='author',%0A field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='changes', to=settings.AUTH_USER_MODEL, verbose_name='Author'),%0A ),%0A %5D%0A
|
|
a42a6a54f732ca7eba700b867a3025739ad6a271
|
Move main code to function because of pylint warning 'Invalid constant name'
|
list_all_users_in_group.py
|
list_all_users_in_group.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import grp
import pwd
import inspect
import argparse
def list_all_users_in_group(groupname):
"""Get list of all users of group.
Get sorted list of all users of group GROUP,
including users with main group GROUP.
Origin in https://github.com/vazhnov/list_all_users_in_group
"""
try:
group = grp.getgrnam(groupname)
# On error "KeyError: 'getgrnam(): name not found: GROUP'"
except KeyError:
return None
group_all_users_set = set(group.gr_mem)
for user in pwd.getpwall():
if user.pw_gid == group.gr_gid:
group_all_users_set.add(user.pw_name)
return sorted(group_all_users_set)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=inspect.getdoc(list_all_users_in_group),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-d', '--delimiter', default='\n', help='Use DELIMITER instead of newline for users delimiter')
parser.add_argument('groupname', help='Group name')
args = parser.parse_args()
result = list_all_users_in_group(args.groupname)
if result:
print (args.delimiter.join(result))
|
Python
| 0 |
@@ -743,33 +743,19 @@
t)%0A%0A
-if __name__ == %22__
+%0Adef
main
-__%22
+()
:%0A
@@ -1252,8 +1252,48 @@
esult))%0A
+%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
|
76aed66701ac9f267ef08bde0f0b55e2ad905e68
|
Add micro bench arith-modulo
|
graal/edu.uci.python.benchmark/src/micro/arith-modulo.py
|
graal/edu.uci.python.benchmark/src/micro/arith-modulo.py
|
Python
| 0.000001 |
@@ -0,0 +1,575 @@
+# zwei 10/09/13%0A# arithmetic ops (partially extracted from spectralnorm)%0Aimport time%0A%0Adef docompute(num):%0A%09for i in range(num):%0A%09%09sum = 0%0A%09%09j = 0%0A%0A%09%09# if i == 0:%0A%09%09# %09i += 1%0A%0A%09%09while j %3C i:%0A%09%09%09if i %25 3 == 0:%0A%09%09%09%09temp = 1%0A%09%09%09else:%0A%09%09%09%09temp = i %25 3%0A%09%09%09%0A%09%09%09j += temp%0A%09%09sum = sum + j%0A%0A%09return sum%0A%0A%0Adef measure(num):%0A%09print(%22Start timing...%22)%0A%09start = time.time()%0A%0A%09for run in range(num):%0A%09%09sum = docompute(5000) #5000%0A%0A%09print(%22sum%22, sum)%0A%0A%09duration = %22%25.3f%5Cn%22 %25 (time.time() - start)%0A%09print(%22arith-modulo: %22 + duration)%0A%0Afor run in range(50):%0A%09docompute(1000) #1000%0A%0Ameasure(50)
|
|
996a4dd0223d8f327dbe822f9f6e430465c6c70f
|
add django settings for djsupervisor
|
measure_mate/settings/supervisord.py
|
measure_mate/settings/supervisord.py
|
Python
| 0 |
@@ -0,0 +1,84 @@
+from measure_mate.settings.base import *%0A%0AINSTALLED_APPS += (%0A 'djsupervisor',%0A)%0A
|
|
d6acd4324f5fe5e57750a335b35cd42edd8544b5
|
Solve the puzzle.
|
01/solve.py
|
01/solve.py
|
Python
| 0.000084 |
@@ -0,0 +1,557 @@
+%22%22%22Report the manhattan distance between a starting point and an ending point,%0Agiven a set of directions to follow to get move between the two points.%22%22%22%0A%0A%0Afrom distance import get_distance%0Afrom directions import load_directions, follow_directions%0A%0A%0Adef main():%0A directions = load_directions('directions.txt')%0A starting_point = (0, 0)%0A starting_orientation = 'N'%0A ending_point, _ = follow_directions(starting_point, starting_orientation, *directions)%0A print(get_distance(starting_point, ending_point))%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
8eff29ba9777cd977f04a2c0b68d598ad63c8f47
|
Create 02.py
|
02/ex/02.py
|
02/ex/02.py
|
Python
| 0 |
@@ -0,0 +1,681 @@
+# Define a procedure, stamps, which takes as its input a positive integer in%0A# pence and returns the number of 5p, 2p and 1p stamps (p is pence) required %0A# to make up that value. The answer should use as many 5p stamps as possible,%0A# then 2 pence stamps and finally 1p stamps.%0A%0Adef stamps(nn):%0A # Your code here%0A return (nn / 5, (nn %25 5) / 2, (nn %25 5) %25 2) %0A%0Aprint stamps(8)%0A#%3E%3E%3E (1, 1, 1) # one 5p stamp, one 2p stamp and one 1p stamp%0Aprint stamps(5)%0A#%3E%3E%3E (1, 0, 0) # one 5p stamp, no 2p stamps and no 1p stamps%0Aprint stamps(29)%0A#%3E%3E%3E (5, 2, 0) # five 5p stamps, two 2p stamps and no 1p stamps%0Aprint stamps(0)%0A#%3E%3E%3E (0, 0, 0) # no 5p stamps, no 2p stamps and no 1p stamps%0A
|
|
406fcf5297458f5469364faf8180683b89fd527c
|
Add wmi sampler tests (#5859)
|
datadog_checks_base/tests/test_wmisampler.py
|
datadog_checks_base/tests/test_wmisampler.py
|
Python
| 0 |
@@ -0,0 +1,1920 @@
+# (C) Datadog, Inc. 2020-present%0A# All rights reserved%0A# Licensed under a 3-clause BSD style license (see LICENSE)%0A%0Aimport pytest%0Afrom tests.utils import requires_windows%0A%0Atry:%0A from datadog_checks.base.checks.win.wmi import WMISampler%0Aexcept ImportError:%0A pass%0A%0A%0A@requires_windows%[email protected]%0Adef test_format_filter_value():%0A filters = %5B%7B'a': 'b'%7D, %7B'c': 'd'%7D%5D%0A sampler = WMISampler(logger=None, class_name='MyClass', property_names='my.prop', filters=filters)%0A formatted_filters = sampler.formatted_filters%0A assert formatted_filters == %22 WHERE ( c = 'd' ) OR ( a = 'b' )%22%0A%0A%0A@requires_windows%[email protected]%0Adef test_format_filter_list():%0A filters = %5B%7B'a': %5B'%3E', 1, 'i_get_ignored'%5D%7D%5D%0A sampler = WMISampler(logger=None, class_name='MyClass', property_names='my.prop', filters=filters)%0A formatted_filters = sampler.formatted_filters%0A assert formatted_filters == %22 WHERE ( a %3E '1' )%22%0A%0A%0A@requires_windows%[email protected]%0Adef test_format_filter_like():%0A filters = %5B%7B'a': '%25foo'%7D%5D%0A sampler = WMISampler(logger=None, class_name='MyClass', property_names='my.prop', filters=filters)%0A formatted_filters = sampler.formatted_filters%0A assert formatted_filters == %22 WHERE ( a LIKE '%25foo' )%22%0A%0A%0A@requires_windows%[email protected]%0Adef test_format_filter_list_expected():%0A filters = %5B%7B'a': %5B'%3C', 3%5D%7D%5D%0A sampler = WMISampler(logger=None, class_name='MyClass', property_names='my.prop', filters=filters)%0A formatted_filters = sampler.formatted_filters%0A assert formatted_filters == %22 WHERE ( a %3C '3' )%22%0A%0A%0A@requires_windows%[email protected]%0Adef test_format_filter_tuple():%0A # needed for backwards compatibility and hardcoded filters%0A filters = %5B%7B'a': ('%3C', 3)%7D%5D%0A sampler = WMISampler(logger=None, class_name='MyClass', property_names='my.prop', filters=filters)%0A formatted_filters = sampler.formatted_filters%0A assert formatted_filters == %22 WHERE ( a %3C '3' )%22%0A
|
|
d067d9937ff34787e6f632d86075af29c27d98f8
|
Add py solution for 714. Best Time to Buy and Sell Stock with Transaction Fee
|
py/best-time-to-buy-and-sell-stock-with-transaction-fee.py
|
py/best-time-to-buy-and-sell-stock-with-transaction-fee.py
|
Python
| 0 |
@@ -0,0 +1,364 @@
+class Solution(object):%0A def maxProfit(self, prices, fee):%0A %22%22%22%0A :type prices: List%5Bint%5D%0A :type fee: int%0A :rtype: int%0A %22%22%22%0A hold, not_hold = None, 0%0A for p in prices:%0A hold, not_hold = max(hold, not_hold - p - fee), max(not_hold, None if hold is None else hold + p)%0A return max(hold, not_hold)%0A
|
|
4c2c80e0004a758787beb555fbbe789cce5e82fc
|
Fix variable referenced before assginment in vmwareapi code.
|
nova/tests/test_vmwareapi_vm_util.py
|
nova/tests/test_vmwareapi_vm_util.py
|
Python
| 0.000001 |
@@ -0,0 +1,1841 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4%0A#%0A# Copyright 2013 Canonical Corp.%0A# All Rights Reserved.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22); you may%0A# not use this file except in compliance with the License. You may obtain%0A# a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS, WITHOUT%0A# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the%0A# License for the specific language governing permissions and limitations%0A# under the License.%0A%0Afrom nova import exception%0Afrom nova import test%0Afrom nova.virt.vmwareapi import fake%0Afrom nova.virt.vmwareapi import vm_util%0A%0A%0Aclass fake_session(object):%0A def __init__(self, ret=None):%0A self.ret = ret%0A%0A def _call_method(self, *args):%0A return self.ret%0A%0A%0Aclass VMwareVMUtilTestCase(test.TestCase):%0A def setUp(self):%0A super(VMwareVMUtilTestCase, self).setUp()%0A%0A def tearDown(self):%0A super(VMwareVMUtilTestCase, self).tearDown()%0A%0A def test_get_datastore_ref_and_name(self):%0A result = vm_util.get_datastore_ref_and_name(%0A fake_session(%5Bfake.Datastore()%5D))%0A%0A self.assertEquals(result%5B1%5D, %22fake-ds%22)%0A self.assertEquals(result%5B2%5D, 1024 * 1024 * 1024)%0A self.assertEquals(result%5B3%5D, 1024 * 1024 * 500)%0A%0A def test_get_datastore_ref_and_name_without_datastore(self):%0A%0A self.assertRaises(exception.DatastoreNotFound,%0A vm_util.get_datastore_ref_and_name,%0A fake_session(), host=%22fake-host%22)%0A%0A self.assertRaises(exception.DatastoreNotFound,%0A vm_util.get_datastore_ref_and_name,%0A fake_session(), cluster=%22fake-cluster%22)%0A
|
|
c687ab125af67d769afc781731b1a2b663a5bb2c
|
Use SystemRandom to generate unpredictable random slugs. Fixed duplicate characters in the choice string, removed iI from it to prevent confusion. Fixes issue #40.
|
dpaste/models.py
|
dpaste/models.py
|
import datetime
import random
import mptt
from django.db import models
from django.core.urlresolvers import reverse
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from dpaste.highlight import LEXER_DEFAULT
L = getattr(settings, 'DPASTE_SLUG_LENGTH', 4)
T = getattr(settings, 'DPASTE_SLUG_CHOICES',
'abcdefghijkmnopqrstuvwwxyzABCDEFGHIJKLOMNOPQRSTUVWXYZ1234567890')
def generate_secret_id(length=L):
return ''.join([random.choice(T) for i in range(length)])
class Snippet(models.Model):
secret_id = models.CharField(_(u'Secret ID'), max_length=255, blank=True)
content = models.TextField(_(u'Content'), )
lexer = models.CharField(_(u'Lexer'), max_length=30, default=LEXER_DEFAULT)
published = models.DateTimeField(_(u'Published'), blank=True)
expires = models.DateTimeField(_(u'Expires'), blank=True)
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
class Meta:
ordering = ('-published',)
db_table = 'dpaste_snippet'
def get_linecount(self):
return len(self.content.splitlines())
@property
def is_single(self):
return self.is_root_node() and not self.get_children()
def save(self, *args, **kwargs):
if not self.pk:
self.published = datetime.datetime.now()
self.secret_id = generate_secret_id()
super(Snippet, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('snippet_details', kwargs={'snippet_id': self.secret_id})
def __unicode__(self):
return self.secret_id
mptt.register(Snippet, order_insertion_by=['content'])
|
Python
| 0 |
@@ -1,12 +1,26 @@
+from datetime
import datet
@@ -27,33 +27,62 @@
ime%0A
+from os
import
+u
random%0A
-import mptt
+from random import SystemRandom
%0A%0Afr
@@ -242,16 +242,29 @@
zy as _%0A
+import mptt%0A%0A
from dpa
@@ -299,16 +299,35 @@
EFAULT%0A%0A
+R = SystemRandom()%0A
L = geta
@@ -437,17 +437,16 @@
pqrstuvw
-w
xyzABCDE
@@ -452,13 +452,11 @@
EFGH
-I
JKL
-O
MNOP
@@ -533,22 +533,17 @@
'.join(%5B
-random
+R
.choice(
@@ -560,22 +560,17 @@
n range(
-length
+L
)%5D)%0A%0Acla
@@ -1374,25 +1374,16 @@
ished =
-datetime.
datetime
|
f3c823db63a6ca6b6679c2a9de8dcb1ccc805e37
|
Remove print
|
blog/templatetags/blog_tags.py
|
blog/templatetags/blog_tags.py
|
from datetime import datetime
from django import template
import sys
from events.models import EventPage, EventsIndexPage
from blog.models import BlogPage, BlogIndexPage, CodeBlock
from collections import OrderedDict
register = template.Library()
@register.inclusion_tag(
'blog/tags/blog_sidebar.html',
takes_context=True
)
def blog_sidebar(context, show_sponsor=True, show_archives=False, show_tags=False, show_children=False, parent=None,
archive_count=sys.maxsize):
blog_index = BlogIndexPage.objects.live().in_menu().first()
if show_archives:
archives = OrderedDict()
for blog in BlogPage.objects.live().order_by('-first_published_at'):
archives.setdefault(blog.date.year, {}).setdefault(blog.date.month, []).append(blog)
else:
archives = None
print(archives)
if show_children and parent:
children = parent.children
else:
children = None
return {
'blog_index': blog_index,
'archives': archives,
'children': children,
'show_sponsor': show_sponsor,
'show_tags': show_tags,
'archive_count': archive_count,
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
# Blog feed for home page
@register.inclusion_tag(
'blog/tags/blog_listing_homepage.html',
takes_context=True
)
def blog_listing_homepage(context, count=5):
blogs = BlogPage.objects.live().order_by('-date')
blog_index = BlogIndexPage.objects.live().in_menu().first()
archives = dict()
for blog in blogs:
archives.setdefault(blog.date.year, {}).setdefault(blog.date.month, []).append(blog)
return {
'blogs': blogs[:count],
'blog_index': blog_index,
'archives': archives,
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
# Event feed for home page
@register.inclusion_tag(
'blog/tags/event_listing_homepage.html',
takes_context=True
)
def event_listing_homepage(context, count=4):
events = EventPage.objects.live().order_by('start')[:count]
return {
'events': events,
'event_list': EventsIndexPage.objects.live().first(),
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
@register.inclusion_tag(
'blog/tags/search_filters.html',
takes_context=True
)
def search_filters(context):
archive_date = context['request'].GET.get('date')
if archive_date:
archive_date = datetime.strftime(
datetime.strptime(context['request'].GET.get('date'), '%Y-%m'), '%B %Y')
return {
'archive_date': archive_date,
'tag': context['request'].GET.get('tag'),
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
@register.filter
def get_code_language(language):
return dict(CodeBlock.LANGUAGE_CHOICES)[language]
@register.filter
def to_month_str(value):
return {
1: 'January',
2: 'February',
3: 'March',
4: 'April',
5: 'May',
6: 'June',
7: 'July',
8: 'August',
9: 'September',
10: 'October',
11: 'November',
12: 'December',
}[value]
|
Python
| 0.000016 |
@@ -827,29 +827,8 @@
ne%0A%0A
- print(archives)%0A%0A
|
1dc439fcf7a823270156708208339a8bf420703c
|
Create Generic Sitemap abstract django
|
opps/sitemaps/sitemaps.py
|
opps/sitemaps/sitemaps.py
|
Python
| 0 |
@@ -0,0 +1,887 @@
+# -*- coding: utf-8 -*-%0Afrom django.contrib.sitemaps import GenericSitemap as DjangoGenericSitemap%0Afrom django.contrib.sitemaps import Sitemap as DjangoSitemap%0Afrom django.utils import timezone%0A%0Afrom opps.articles.models import Article%0A%0A%0Adef InfoDisct(googlenews=False):%0A article = Article.objects.filter(date_available__lte=timezone.now(),%0A published=True)%0A if googlenews:%0A article = article%5B:1000%5D%0A return %7B%0A 'queryset': article,%0A 'date_field': 'date_available',%0A %7D%0A%0A%0Aclass BaseSitemap(DjangoSitemap):%0A priority = 0.6%0A%0A def items(self):%0A return Article.objects.filter(date_available__lte=timezone.now(),%0A published=True)%0A%0A def lastmod(self, obj):%0A return obj.date_available%0A%0A%0Aclass GenericSitemap(DjangoGenericSitemap):%0A limit = 1000%0A priority = 0.6%0A
|
|
7bab9610bf9278b8dedb55a513f22130e2f629ed
|
Add PP example
|
examples/13_PreferedPhase.py
|
examples/13_PreferedPhase.py
|
Python
| 0.000001 |
@@ -0,0 +1,2243 @@
+%22%22%22This example illustrate hox to find the prefered-phase (PP).%0A%0AFirst, the amplitude is binned according to phase slices (360%C2%B0/nbins). Then,%0Athe PP is defined as the phase where the amplitude is maximum.%0A%22%22%22%0Aimport numpy as np%0Aimport matplotlib.pyplot as plt%0Afrom tensorpac import PacSignals, Pac%0A%0Aplt.style.use('seaborn-poster')%0A%0A# Generate 100 datasets with a %5B5, 7%5D%3C-%3E%5B90, 100%5Dhz coupling :%0Asf = 1024.%0Andatasets = 100%0Adata, time = PacSignals(fpha=%5B5, 7%5D, famp=%5B95, 105%5D, ndatasets=ndatasets,%0A sf=sf, noise=3, chi=.7, npts=2000)%0A%0A%0A# Define a Pac object. Here, we are not going to use the idpac variable :%0Ap = Pac(fpha=%5B5, 7%5D, famp=(60, 200, 10, 1))%0A%0A# Extract the phase and the amplitude :%0Apha = p.filter(sf, data, axis=1, ftype='phase')%0Aamp = p.filter(sf, data, axis=1, ftype='amplitude')%0A%0A# Introduce a 2*pi/2 phase shift (equivalent to adding a 90%C2%B0 shift) :%0Apha += np.pi/2%0A%0A# Now, compute the PP :%0Aambin, pp, vecbin = p.pp(pha, amp, axis=2, nbins=72)%0A%0A# Reshape the PP to be (ndatasets, namp) :%0App = np.squeeze(pp).T%0A%0A# Reshape the amplitude to be (nbins, namp, ndatasets) and take the mean across%0A# datasets :%0Aambin = np.squeeze(ambin).mean(-1)%0A%0A# plt.figure(figsize=(20, 35))%0A# Plot the prefered phase :%0Aplt.subplot(221)%0Aplt.pcolormesh(p.yvec, np.arange(100), np.rad2deg(pp), cmap='Spectral_r')%0Acb = plt.colorbar()%0Aplt.clim(vmin=-180., vmax=180.)%0Aplt.axis('tight')%0Aplt.xlabel('Amplitude center frequency (Hz)')%0Aplt.ylabel('Ndatasets')%0Aplt.title(%22PP for each dataset and for several amplitudes.%5Cn100hz amplitudes%22%0A %22 are phase locked to 90%C2%B0 (%3C=%3E pi/2)%22)%0Acb.set_label('PP (in degrees)')%0A%0A# Then, we show the histogram corresponding to an 100he amplitude :%0Aidx100 = np.abs(p.yvec - 100.).argmin()%0Aplt.subplot(222)%0Ah = plt.hist(pp%5B:, idx100%5D, color='#ab4642')%0Aplt.xlim((-np.pi, np.pi))%0Aplt.xlabel('PP')%0Aplt.title('PP across trials for the 100hz amplitude')%0Aplt.xticks(%5B-np.pi, -np.pi/2, 0, np.pi/2, np.pi%5D)%0Aplt.gca().set_xticklabels(%5Br%22$-%5Cpi$%22, r%22$-%5Cfrac%7B%5Cpi%7D%7B2%7D$%22, %22$0$%22,%0A r%22$%5Cfrac%7B%5Cpi%7D%7B2%7D$%22, r%22$%5Cpi$%22%5D)%0A%0Ap.polar(ambin.T, vecbin, p.yvec, cmap='Spectral_r', interp=.1, subplot=212,%0A cblabel='Amplitude bins')%0A%0A# plt.savefig('pp.png', dpi=300, bbox_inches='tight')%0A%0Ap.show()%0A
|
|
25b7f06a0f185a7e83aab38888e32b41c2c31853
|
Create 02.py
|
03/hw/02.py
|
03/hw/02.py
|
Python
| 0 |
@@ -0,0 +1,381 @@
+# Define a procedure, greatest,%0A# that takes as input a list%0A# of positive numbers, and%0A# returns the greatest number%0A# in that list. If the input%0A# list is empty, the output%0A# should be 0.%0A%0Adef greatest(xs):%0A greatest = 0%0A for x in xs:%0A if x %3E greatest:%0A greatest = x%0A return greatest%0A%0A#print greatest(%5B4,23,1%5D)%0A#%3E%3E%3E 23%0A#print greatest(%5B%5D)%0A#%3E%3E%3E 0%0A%0A %0A
|
|
9e21a0552bd283eab2b0d35562cf2ab8edbb81bd
|
Clarify import output.
|
events/categories.py
|
events/categories.py
|
import re
from events.models import Category, CategoryLabel
from difflib import get_close_matches
ENDING_PARENTHESIS_PATTERN = r' \([^)]+\)$'
class CategoryMatcher(object):
def __init__(self):
label_to_category_ids = {}
self.name_to_category_ids = {}
for label_id, category_id in Category.alt_labels.through.objects.all().values_list(
'categorylabel_id', 'category_id'):
label_to_category_ids.setdefault(label_id, set()).add(category_id)
for label_id, name in CategoryLabel.objects.filter(language_id='fi').values_list(
'id', 'name'):
self.name_to_category_ids[name.lower()] = label_to_category_ids.get(label_id, set())
for cid, preflabel in Category.objects.all().values_list(
'id', 'name_fi'):
if preflabel is not None:
text = preflabel.lower()
self.name_to_category_ids.setdefault(text, set()).add(cid)
without_parenthesis = re.sub(ENDING_PARENTHESIS_PATTERN, '', text)
if without_parenthesis != text:
self.name_to_category_ids.setdefault(without_parenthesis, set()).add(cid)
self.labels = self.name_to_category_ids.keys()
print('Initialized', len(self.labels), 'keys')
def match(self, text):
wordsplit = re.compile(r'\s+')
#labels = CategoryLabel.objects
#match = labels.filter(name__iexact=text)
text = text.lower()
if text == 'kokous': text = 'kokoukset'
elif text == 'kuntoilu': text = 'kuntoliikunta'
elif text == 'samba': text = 'sambat'
exact_match = lambda x: x.lower() == text
labels = self.labels
matches = [l for l in labels if exact_match(l)]
success = lambda: len(matches) > 0
if success():
match_type = 'exact'
if not success():
words = wordsplit.split(text)
if len(words) > 1:
for word in words:
exact_match = lambda x: x.lower() == word
matches.extend([l for l in labels if exact_match(l)])
if success(): match_type = 'subword'
if not success():
matches = [l for l in labels if l.lower().startswith(text)]
match_type = 'prefix'
if not success():
matches = [l for l in labels if l.lower() == text + 't']
if success(): match_type = 'simple-plural'
if not success():
matches = [l for l in labels if l.lower().startswith(text[0:-2])]
if success(): match_type = 'cut-two-letters'
if not success():
if len(text) > 10:
matches = [l for l in labels if l.lower().startswith(text[0:-5])]
if success(): match_type = 'prefix'
if not success():
for i in range(1, 10):
matches = [l for l in labels if l.lower() == text[i:]]
if success():
match_type = 'suffix'
break
if not success():
print('no match', text)
return None
if success():
category_ids = set()
if match_type not in ['exact', 'subword']:
cmatch = get_close_matches(
text, [m.lower() for m in matches], n=1)
if len(cmatch) == 1:
category_ids = self.name_to_category_ids.get(cmatch[0])
else:
for m in matches:
category_ids.update(self.name_to_category_ids[m])
if len(category_ids) < 1:
print('no matches for', text)
return None
objects = Category.objects.filter(id__in=category_ids)
if len(category_ids) > 1:
try:
aggregate_category = objects.get(aggregate=True)
aggregate_name = re.sub(ENDING_PARENTHESIS_PATTERN, '' , aggregate_category.name_fi)
result = [aggregate_category]
for o in objects.exclude(name_fi__istartswith=aggregate_name):
result.append(o)
return result
except Category.DoesNotExist:
pass
return objects
return objects
|
Python
| 0.000009 |
@@ -1277,16 +1277,25 @@
bels), '
+category
keys')%0A%0A
|
321c857d4cc2bacdeaa398d3b4b1fd7769f33718
|
Add py-soupsieve package (#12827)
|
var/spack/repos/builtin/packages/py-soupsieve/package.py
|
var/spack/repos/builtin/packages/py-soupsieve/package.py
|
Python
| 0 |
@@ -0,0 +1,702 @@
+# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass PySoupsieve(PythonPackage):%0A %22%22%22A modern CSS selector implementation for Beautiful Soup.%22%22%22%0A%0A homepage = %22https://github.com/facelessuser/soupsieve%22%0A url = %22https://pypi.io/packages/source/s/soupsieve/soupsieve-1.9.3.tar.gz%22%0A%0A version('1.9.3', sha256='8662843366b8d8779dec4e2f921bebec9afd856a5ff2e82cd419acc5054a1a92')%0A%0A depends_on('py-setuptools', type='build')%0A depends_on('py-backports-functools-lru-cache', when='%5Epython@:2', type=('build', 'run'))%0A
|
|
107994f3ac39b60b389fa4d7eb4efbb284b70a10
|
implement logic improvement suggestion by @balloob
|
homeassistant/components/sensor/ecobee.py
|
homeassistant/components/sensor/ecobee.py
|
"""
homeassistant.components.sensor.ecobee
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Ecobee Thermostat Component
This component adds support for Ecobee3 Wireless Thermostats.
You will need to setup developer access to your thermostat,
and create and API key on the ecobee website.
The first time you run this component you will see a configuration
component card in Home Assistant. This card will contain a PIN code
that you will need to use to authorize access to your thermostat. You
can do this at https://www.ecobee.com/consumerportal/index.html
Click My Apps, Add application, Enter Pin and click Authorize.
After authorizing the application click the button in the configuration
card. Now your thermostat and sensors should shown in home-assistant.
You can use the optional hold_temp parameter to set whether or not holds
are set indefintely or until the next scheduled event.
ecobee:
api_key: asdfasdfasdfasdfasdfaasdfasdfasdfasdf
hold_temp: True
"""
import logging
from homeassistant.helpers.entity import Entity
from homeassistant.components import ecobee
from homeassistant.const import TEMP_FAHRENHEIT
DEPENDENCIES = ['ecobee']
SENSOR_TYPES = {
'temperature': ['Temperature', TEMP_FAHRENHEIT],
'humidity': ['Humidity', '%'],
'occupancy': ['Occupancy', '']
}
_LOGGER = logging.getLogger(__name__)
ECOBEE_CONFIG_FILE = 'ecobee.conf'
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the sensors. """
if discovery_info is None:
return
data = ecobee.NETWORK
dev = list()
for index in range(len(data.ecobee.thermostats)):
for sensor in data.ecobee.get_remote_sensors(index):
for item in sensor['capability']:
if item['type'] == 'temperature':
dev.append(
EcobeeSensor(sensor['name'], 'temperature', index))
elif item['type'] == 'humidity':
dev.append(
EcobeeSensor(sensor['name'], 'humidity', index))
elif item['type'] == 'occupancy':
dev.append(
EcobeeSensor(sensor['name'], 'occupancy', index))
add_devices(dev)
class EcobeeSensor(Entity):
""" An ecobee sensor. """
def __init__(self, sensor_name, sensor_type, sensor_index):
self._name = sensor_name + ' ' + SENSOR_TYPES[sensor_type][0]
self.sensor_name = sensor_name
self.type = sensor_type
self.index = sensor_index
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self.update()
@property
def name(self):
return self._name.rstrip()
@property
def state(self):
""" Returns the state of the device. """
return self._state
@property
def unit_of_measurement(self):
return self._unit_of_measurement
def update(self):
data = ecobee.NETWORK
data.update()
for sensor in data.ecobee.get_remote_sensors(self.index):
for item in sensor['capability']:
if (
item['type'] == self.type and
self.type == 'temperature' and
self.sensor_name == sensor['name']):
self._state = float(item['value']) / 10
elif (
item['type'] == self.type and
self.type == 'humidity' and
self.sensor_name == sensor['name']):
self._state = item['value']
elif (
item['type'] == self.type and
self.type == 'occupancy' and
self.sensor_name == sensor['name']):
self._state = item['value']
|
Python
| 0 |
@@ -1750,19 +1750,24 @@
'type'%5D
-==
+not in (
'tempera
@@ -1771,17 +1771,17 @@
erature'
-:
+,
%0A
@@ -1789,36 +1789,24 @@
-dev.append(%0A
@@ -1817,111 +1817,32 @@
- EcobeeSensor(sensor%5B'name'%5D, 'temperature', index))%0A elif item%5B'type'%5D == 'humidit
+'humidity', 'occupanc
y'
+)
:%0A
@@ -1863,147 +1863,18 @@
-dev.append(%0A EcobeeSensor(sensor%5B'name'%5D, 'humidity', index))%0A elif item%5B'type'%5D == 'occupancy':%0A
+continue%0A%0A
@@ -1900,33 +1900,8 @@
end(
-%0A
Ecob
@@ -1925,27 +1925,28 @@
name'%5D,
-'occupancy'
+item%5B'type'%5D
, index)
|
4509caefde4829327fecb45b5aded0938c2f8c0b
|
Add index parameter to scrape sensor (#21084)
|
homeassistant/components/sensor/scrape.py
|
homeassistant/components/sensor/scrape.py
|
"""
Support for getting data from websites with scraping.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.scrape/
"""
import logging
import voluptuous as vol
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.components.sensor.rest import RestData
from homeassistant.const import (
CONF_NAME, CONF_RESOURCE, CONF_UNIT_OF_MEASUREMENT,
CONF_VALUE_TEMPLATE, CONF_VERIFY_SSL, CONF_USERNAME, CONF_HEADERS,
CONF_PASSWORD, CONF_AUTHENTICATION, HTTP_BASIC_AUTHENTICATION,
HTTP_DIGEST_AUTHENTICATION)
from homeassistant.helpers.entity import Entity
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['beautifulsoup4==4.7.1']
_LOGGER = logging.getLogger(__name__)
CONF_ATTR = 'attribute'
CONF_SELECT = 'select'
DEFAULT_NAME = 'Web scrape'
DEFAULT_VERIFY_SSL = True
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_RESOURCE): cv.string,
vol.Required(CONF_SELECT): cv.string,
vol.Optional(CONF_ATTR): cv.string,
vol.Optional(CONF_AUTHENTICATION):
vol.In([HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION]),
vol.Optional(CONF_HEADERS): vol.Schema({cv.string: cv.string}),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Web scrape sensor."""
name = config.get(CONF_NAME)
resource = config.get(CONF_RESOURCE)
method = 'GET'
payload = None
headers = config.get(CONF_HEADERS)
verify_ssl = config.get(CONF_VERIFY_SSL)
select = config.get(CONF_SELECT)
attr = config.get(CONF_ATTR)
unit = config.get(CONF_UNIT_OF_MEASUREMENT)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
value_template = config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
if username and password:
if config.get(CONF_AUTHENTICATION) == HTTP_DIGEST_AUTHENTICATION:
auth = HTTPDigestAuth(username, password)
else:
auth = HTTPBasicAuth(username, password)
else:
auth = None
rest = RestData(method, resource, auth, headers, payload, verify_ssl)
rest.update()
if rest.data is None:
raise PlatformNotReady
add_entities([
ScrapeSensor(rest, name, select, attr, value_template, unit)], True)
class ScrapeSensor(Entity):
"""Representation of a web scrape sensor."""
def __init__(self, rest, name, select, attr, value_template, unit):
"""Initialize a web scrape sensor."""
self.rest = rest
self._name = name
self._state = None
self._select = select
self._attr = attr
self._value_template = value_template
self._unit_of_measurement = unit
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def state(self):
"""Return the state of the device."""
return self._state
def update(self):
"""Get the latest data from the source and updates the state."""
self.rest.update()
from bs4 import BeautifulSoup
raw_data = BeautifulSoup(self.rest.data, 'html.parser')
_LOGGER.debug(raw_data)
try:
if self._attr is not None:
value = raw_data.select(self._select)[0][self._attr]
else:
value = raw_data.select(self._select)[0].text
_LOGGER.debug(value)
except IndexError:
_LOGGER.error("Unable to extract data from HTML")
return
if self._value_template is not None:
self._state = self._value_template.render_with_possible_json_value(
value, None)
else:
self._state = value
|
Python
| 0 |
@@ -942,16 +942,37 @@
'select'
+%0ACONF_INDEX = 'index'
%0A%0ADEFAUL
@@ -1181,32 +1181,90 @@
TR): cv.string,%0A
+ vol.Optional(CONF_INDEX, default=0): cv.positive_int,%0A
vol.Optional
@@ -2139,16 +2139,51 @@
F_ATTR)%0A
+ index = config.get(CONF_INDEX)%0A
unit
@@ -2891,32 +2891,39 @@
e, select, attr,
+ index,
value_template,
@@ -2930,16 +2930,33 @@
unit)%5D,
+%0A
True)%0A%0A
@@ -3082,16 +3082,23 @@
t, attr,
+ index,
value_t
@@ -3293,16 +3293,44 @@
= attr%0A
+ self._index = index%0A
@@ -4137,17 +4137,27 @@
select)%5B
-0
+self._index
%5D%5Bself._
@@ -4234,17 +4234,27 @@
select)%5B
-0
+self._index
%5D.text%0A
|
6ca8bb70e8e9c6d40418e836d222648478eb8f31
|
Split Questins into students and institutions
|
tada/schools/migrations/0005_auto_20150427_1938.py
|
tada/schools/migrations/0005_auto_20150427_1938.py
|
Python
| 0 |
@@ -0,0 +1,3331 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('schools', '0004_auto_20150427_1912'),%0A %5D%0A%0A operations = %5B%0A migrations.CreateModel(%0A name='QuestionInstitution',%0A fields=%5B%0A ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),%0A ('name', models.CharField(max_length=200)),%0A ('question_type', models.IntegerField(default=1, choices=%5B(1, b'Marks'), (2, b'Grade')%5D)),%0A ('score_min', models.DecimalField(null=True, max_digits=10, decimal_places=2, blank=True)),%0A ('score_max', models.DecimalField(null=True, max_digits=10, decimal_places=2, blank=True)),%0A ('grade', models.CharField(max_length=100, null=True, blank=True)),%0A ('order', models.IntegerField()),%0A ('double_entry', models.BooleanField(default=True)),%0A ('active', models.IntegerField(default=2, null=True, blank=True)),%0A ('assessment', models.ForeignKey(to='schools.AssessmentInstitution')),%0A %5D,%0A options=%7B%0A 'ordering': %5B'order'%5D,%0A %7D,%0A ),%0A migrations.CreateModel(%0A name='QuestionStudent',%0A fields=%5B%0A ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),%0A ('name', models.CharField(max_length=200)),%0A ('question_type', models.IntegerField(default=1, choices=%5B(1, b'Marks'), (2, b'Grade')%5D)),%0A ('score_min', models.DecimalField(null=True, max_digits=10, decimal_places=2, blank=True)),%0A ('score_max', models.DecimalField(null=True, max_digits=10, decimal_places=2, blank=True)),%0A ('grade', models.CharField(max_length=100, null=True, blank=True)),%0A ('order', models.IntegerField()),%0A ('double_entry', models.BooleanField(default=True)),%0A ('active', models.IntegerField(default=2, null=True, blank=True)),%0A ('assessment', models.ForeignKey(to='schools.AssessmentStudent')),%0A %5D,%0A options=%7B%0A 'ordering': %5B'order'%5D,%0A %7D,%0A ),%0A migrations.AlterUniqueTogether(%0A name='question',%0A unique_together=set(%5B%5D),%0A ),%0A migrations.RemoveField(%0A model_name='question',%0A name='assessment',%0A ),%0A migrations.AlterField(%0A model_name='answerinstitution',%0A name='question',%0A field=models.ForeignKey(to='schools.QuestionInstitution'),%0A ),%0A migrations.AlterField(%0A model_name='answerstudent',%0A name='question',%0A field=models.ForeignKey(to='schools.QuestionStudent'),%0A ),%0A migrations.DeleteModel(%0A name='Question',%0A ),%0A migrations.AlterUniqueTogether(%0A name='questionstudent',%0A unique_together=set(%5B('assessment', 'name')%5D),%0A ),%0A migrations.AlterUniqueTogether(%0A name='questioninstitution',%0A unique_together=set(%5B('assessment', 'name')%5D),%0A ),%0A %5D%0A
|
|
687661d05179d7a36629a6bd036cdb8dc6a3c637
|
Create BasePage.py
|
BasePage.py
|
BasePage.py
|
Python
| 0 |
@@ -0,0 +1,1060 @@
+from selenium import webdriver%0Afrom selenium.webdriver.common.by import By%0A%0A#This is the base class that define attributes and methods to all classes%0A%0Aclass BasePage(object):%0A def __init__(self, driver):%0A self.driver = driver%0A self.driver.implicitly_wait(30)%0A self.driver.timeout=30%0A%0A%0A#This class represents the login page that we have to create%0Aclass LoginPage(BasePage):%0A email_id=(By.NAME,'email')%0A pass_word=(By.NAME,'pass')%0A submit_btn=(By.ID,'u_0_l')%0A%0A def set_email(self,email_id):%0A email_element=self.driver.find_element(*LoginPage.email_id)%0A email_element.send_keys(email_id)%0A%0A def set_password(self,password):%0A password_element=self.driver.find_element(*LoginPage.pass_word)%0A password_element.send_keys(password)%0A%0A def click_submit_btn(self):%0A submit_button=self.driver.find_element(*LoginPage.submit_btn)%0A submit_button.click()%0A%0A%0A def login(self, email,password):%0A self.set_email(email)%0A self.set_password(password)%0A self.click_submit_btn()%0A
|
|
5d6777cc386f6fbd982b5021a55b9a8a0510ef1a
|
Convert month
|
ch06_07_p.py
|
ch06_07_p.py
|
Python
| 0.003029 |
@@ -0,0 +1,198 @@
+months = %5B'JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC'%5D%0A%0Amonth, day, year = input().strip().split(%22/%22)%0Aprint(%22%25s %25s %25s%22 %25 (day, months%5Bint(month) - 1%5D, year))%0A%0A
|
|
1db6185a9637377ff63b2b824d625eaf6a990cb3
|
The default number of sentences was wrong
|
corpora/semeval_to_stanford.py
|
corpora/semeval_to_stanford.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import sh
import sys
from corpora.parsers import ColumnCorpusParser
from tqdm import tqdm
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('corpus',
help='SenSem column file path')
parser.add_argument('--output',
default=None,
help='Output file to write (defaults to STDOUT)')
parser.add_argument('--sentences',
default=4851,
help='Number of sentences to parse')
parser.add_argument('--server',
default='http://localhost:9000',
help='Full http address and port of the server')
args = parser.parse_args()
output = sys.stdout if args.output is None else open(args.output, 'w')
parser = ColumnCorpusParser(args.corpus, 'idx', 'token', 'lemma', 'pos')
for sidx, sentence in enumerate(tqdm(parser.sentences, total=args.sentences), start=1):
tokenized_sentences = ' '.join(word.token for word in sentence)
parsed_sentences = sh.wget('--post-data', '%s' % tokenized_sentences, args.server, '-O', '-')
parsed_sentences = [ps.split('\n') for ps in parsed_sentences.strip().split('\n\n')]
original_lemma_idx = int(sentence.lemma_idx)
# FIXME: This is savage!!!
ps_len = [len(ps) for ps in parsed_sentences]
ps_len = [sum(ps_len[:i]) for i in range(1, len(ps_len) + 1)]
lemma_sentence_idx = next(sidx for sidx, slen in enumerate(ps_len) if slen >= original_lemma_idx)
lemma_sentence = parsed_sentences[lemma_sentence_idx]
if lemma_sentence_idx > 0:
lemma_idx = original_lemma_idx - ps_len[lemma_sentence_idx-1] - 1
else:
lemma_idx = original_lemma_idx - 1
if parsed_sentences[lemma_sentence_idx][lemma_idx].strip().split()[2] != sentence.lemma:
tqdm.write('NOT FOUND LEMMA for sentence %s' % sentence.sentence_index, file=sys.stdout)
printing_sentence = '\n'.join('\n'.join(ps) for ps in parsed_sentences)
else:
sentence['lemma_idx'] = str(lemma_idx + 1)
printing_sentence = '\n'.join(parsed_sentences[lemma_sentence_idx])
printing_sentence = sh.column('-t', _in=printing_sentence.strip() + '\n')
print(sentence.metadata_string, file=output)
print(printing_sentence.strip(), file=output, end='\n\n')
if args.output is not None:
output.close()
print('SenSem corpus parsed', file=sys.stderr)
|
Python
| 0.999999 |
@@ -603,12 +603,13 @@
ult=
-4851
+27132
,%0A
|
3dc50a3f5dbb674a8b7e5383768bc5ebe72ea077
|
add AUC example
|
examples/classification_auc.py
|
examples/classification_auc.py
|
Python
| 0.998252 |
@@ -0,0 +1,889 @@
+from tgboost import TGBoost%0Aimport pandas as pd%0A%0Atrain = pd.read_csv('train.csv')%0Atrain = train.sample(frac=1.0, axis=0) # shuffle the data%0Aval = train.iloc%5B0:5000%5D%0Atrain = train.iloc%5B5000:%5D%0A%0A%0Atrain_y = train.label%0Atrain_X = train.drop('label', axis=1)%0Aval_y = val.label%0Aval_X = val.drop('label', axis=1)%0A%0A%0Aparams = %7B'loss': %22logisticloss%22,%0A 'eta': 0.3,%0A 'max_depth': 6,%0A 'num_boost_round': 500,%0A 'scale_pos_weight': 1.0,%0A 'subsample': 0.7,%0A 'colsample_bytree': 0.7,%0A 'colsample_bylevel': 1.0,%0A 'min_sample_split': 10,%0A 'min_child_weight': 2,%0A 'reg_lambda': 10,%0A 'gamma': 0,%0A 'eval_metric': %22auc%22,%0A 'early_stopping_rounds': 20,%0A 'maximize': False,%0A 'num_thread': 16%7D%0A%0A%0Atgb = TGBoost()%0Atgb.fit(train_X, train_y, validation_data=(val_X, val_y), **params)%0A
|
|
301f8cf79d1793826a9b73fca6406c005a1c1638
|
Create search.py (#4900)
|
examples/contrib/search.py
|
examples/contrib/search.py
|
Python
| 0 |
@@ -0,0 +1,3070 @@
+import re%0Aimport typing%0A%0Afrom json import dumps%0A%0Afrom mitmproxy import command, ctx, flow%0A%0A%0AMARKER = ':mag:'%0ARESULTS_STR = 'Search Results: '%0A%0A%0Aclass Search:%0A def __init__(self):%0A self.exp = None%0A%0A @command.command('search')%0A def _search(self,%0A flows: typing.Sequence%5Bflow.Flow%5D,%0A regex: str) -%3E None:%0A %22%22%22%0A Defines a command named %22search%22 that matches%0A the given regular expression against most parts%0A of each request/response included in the selected flows.%0A%0A Usage: from the flow list view, type %22:search%22 followed by%0A a space, then a flow selection expression; e.g., %22@shown%22,%0A then the desired regular expression to perform the search.%0A%0A Alternatively, define a custom shortcut in keys.yaml; e.g.:%0A -%0A key: %22/%22%0A ctx: %5B%22flowlist%22%5D%0A cmd: %22console.command search @shown %22%0A%0A Flows containing matches to the expression will be marked%0A with the magnifying glass emoji, and their comments will%0A contain JSON-formatted search results.%0A%0A To view flow comments, enter the flow view%0A and navigate to the detail tab.%0A %22%22%22%0A%0A try:%0A self.exp = re.compile(regex)%0A except re.error as e:%0A ctx.log.error(e)%0A return%0A%0A for _flow in flows:%0A # Erase previous results while preserving other comments:%0A comments = list()%0A for c in _flow.comment.split('%5Cn'):%0A if c.startswith(RESULTS_STR):%0A break%0A comments.append(c)%0A _flow.comment = '%5Cn'.join(comments)%0A%0A if _flow.marked == MARKER:%0A _flow.marked = False%0A%0A results = %7Bk: v for k, v in self.flow_results(_flow).items() if v%7D%0A if results:%0A comments.append(RESULTS_STR)%0A comments.append(dumps(results, indent=2))%0A _flow.comment = '%5Cn'.join(comments)%0A _flow.marked = MARKER%0A%0A def header_results(self, message):%0A results = %7Bk: self.exp.findall(v) for k, v in message.headers.items()%7D%0A return %7Bk: v for k, v in results.items() if v%7D%0A%0A def flow_results(self, _flow):%0A results = dict()%0A results.update(%0A %7B'flow_comment': self.exp.findall(_flow.comment)%7D)%0A if _flow.request is not None:%0A results.update(%0A %7B'request_path': self.exp.findall(_flow.request.path)%7D)%0A results.update(%0A %7B'request_headers': self.header_results(_flow.request)%7D)%0A if _flow.request.text:%0A results.update(%0A %7B'request_body': self.exp.findall(_flow.request.text)%7D)%0A if _flow.response is not None:%0A results.update(%0A %7B'response_headers': self.header_results(_flow.response)%7D)%0A if _flow.response.text:%0A results.update(%0A %7B'response_body': self.exp.findall(_flow.response.text)%7D)%0A return results%0A%0A%0Aaddons = %5BSearch()%5D%0A
|
|
15b7279b0437cf14c2d5657b99d037beb044949f
|
Convert JSON to TSV
|
JSON2TSV.py
|
JSON2TSV.py
|
Python
| 0.999999 |
@@ -0,0 +1,1061 @@
+#!/usr/bin/env python%0A# -*- coding: latin-1 -*-%0A''' Script to convert a JSON file to TSV. Adapted from http://kailaspatil.blogspot.com/2013/07/python-script-to-convert-json-file-into.html%0A'''%0Aimport fileinput%0Aimport json%0Aimport csv%0Aimport sys%0A%0AEPILOG = ''' Usage: %25(prog)s -i %5Binput JSON file%5D %3E %5Boutput TSV file%5D '''%0A%0Adef main():%0A%09import argparse%0A%09parser = argparse.ArgumentParser(description=__doc__, epilog=EPILOG,%0A%09 formatter_class=argparse.RawDescriptionHelpFormatter,%0A%09)%0A%0A%09parser.add_argument('--infile', '-i', help=%22JSON file to convert to TSV%22)%0A%09args = parser.parse_args()%0A%0A%09lines = %5B%5D%0A%09if args.infile:%0A%09%09with open(args.infile, 'r') as f:%0A%09%09%09for line in f:%0A%09%09%09%09lines.append(line)%0A%09else:%0A%09%09print %3E%3E sys.stderr, %22Please supply an input JSON file with -i%22%0A%0A%09new_json = json.loads(''.join(lines))%0A%09keys = %7B%7D%0A%0A%09for i in new_json:%0A%09%09for k in i.keys():%0A%09%09%09keys%5Bk%5D = 1%0A%0A%09tab_out = csv.DictWriter(sys.stdout, fieldnames=keys.keys(), dialect='excel-tab')%0A%09tab_out.writeheader()%0A%0A%09for row in new_json:%0A%09%09tab_out.writerow(row)%0A%0Aif __name__ == '__main__':%0A%09main()
|
|
7707e65ed591b890d91bcb7bf22923b8c17a113a
|
Add tests from Gregor's PR
|
readthedocs/rtd_tests/tests/test_api_permissions.py
|
readthedocs/rtd_tests/tests/test_api_permissions.py
|
Python
| 0 |
@@ -0,0 +1,2972 @@
+from functools import partial%0Afrom mock import Mock%0Afrom unittest import TestCase%0A%0Afrom readthedocs.restapi.permissions import APIRestrictedPermission%0A%0A%0Aclass APIRestrictedPermissionTests(TestCase):%0A def get_request(self, method, is_admin):%0A request = Mock()%0A request.method = method%0A request.user.is_staff = is_admin%0A return request%0A%0A def assertAllow(self, handler, method, is_admin, obj=None):%0A if obj is None:%0A self.assertTrue(handler.has_permission(%0A request=self.get_request(method, is_admin=is_admin),%0A view=None))%0A else:%0A self.assertTrue(handler.has_object_permission(%0A request=self.get_request(method, is_admin=is_admin),%0A view=None,%0A obj=obj))%0A%0A def assertDisallow(self, handler, method, is_admin, obj=None):%0A if obj is None:%0A self.assertFalse(handler.has_permission(%0A request=self.get_request(method, is_admin=is_admin),%0A view=None))%0A else:%0A self.assertFalse(handler.has_object_permission(%0A request=self.get_request(method, is_admin=is_admin),%0A view=None,%0A obj=obj))%0A%0A def test_non_object_permissions(self):%0A handler = APIRestrictedPermission()%0A%0A assertAllow = partial(self.assertAllow, handler, obj=None)%0A assertDisallow = partial(self.assertDisallow, handler, obj=None)%0A%0A assertAllow('GET', is_admin=False)%0A assertAllow('HEAD', is_admin=False)%0A assertAllow('OPTIONS', is_admin=False)%0A assertDisallow('DELETE', is_admin=False)%0A assertDisallow('PATCH', is_admin=False)%0A assertDisallow('POST', is_admin=False)%0A assertDisallow('PUT', is_admin=False)%0A%0A assertAllow('GET', is_admin=True)%0A assertAllow('HEAD', is_admin=True)%0A assertAllow('OPTIONS', is_admin=True)%0A assertAllow('DELETE', is_admin=True)%0A assertAllow('PATCH', is_admin=True)%0A assertAllow('POST', is_admin=True)%0A assertAllow('PUT', is_admin=True)%0A%0A def test_object_permissions(self):%0A handler = APIRestrictedPermission()%0A%0A obj = Mock()%0A%0A assertAllow = partial(self.assertAllow, handler, obj=obj)%0A assertDisallow = partial(self.assertDisallow, handler, obj=obj)%0A%0A assertAllow('GET', is_admin=False)%0A assertAllow('HEAD', is_admin=False)%0A assertAllow('OPTIONS', is_admin=False)%0A assertDisallow('DELETE', is_admin=False)%0A assertDisallow('PATCH', is_admin=False)%0A assertDisallow('POST', is_admin=False)%0A assertDisallow('PUT', is_admin=False)%0A%0A assertAllow('GET', is_admin=True)%0A assertAllow('HEAD', is_admin=True)%0A assertAllow('OPTIONS', is_admin=True)%0A assertAllow('DELETE', is_admin=True)%0A assertAllow('PATCH', is_admin=True)%0A assertAllow('POST', is_admin=True)%0A assertAllow('PUT', is_admin=True)%0A
|
|
44f70a0c8ea9613214ce6305c262a8508b4bc598
|
create add_user.py
|
add_user.py
|
add_user.py
|
Python
| 0.000005 |
@@ -0,0 +1,776 @@
+#!/usr/bin/python%0A%0Aimport bluetooth%0A%0Aprint(%22Scanning for bluetooth devices in discoverable mode...%22)%0Anearby_devices = bluetooth.discover_devices(lookup_names = True)%0A%0Afor i, (addr, name) in enumerate(nearby_devices):%0A print(%22%5B%7B%7D%5D %7B%7D %7B%7D%22.format(i, addr, name))%0A%0Anum = raw_input(%22Enter the number of your device (or type anything else to quit)%5Cn%22)%0A%0Aif num.isdigit() and 0 %3C= int(num) %3C len(nearby_devices):%0A addr, name = nearby_devices%5Bint(num)%5D%0A%0A maybe_name = raw_input(%22Enter a name for this device (or press enter to use '%7B%7D')%5Cn%22.format(name))%0A if maybe_name != '':%0A name = maybe_name%0A%0A with open(%22users.txt%22, %22a%22) as users_file:%0A users_file.write(%22%7B%7D %7B%7D%5Cn%22.format(addr, name))%0A print(%22Successfully added '%7B%7D'%22.format(name))%0A%0Aelse:%0A exit()%0A
|
|
c8c679221e0a36ac6074c0869bfc4b75d9745ae2
|
Create a.py
|
abc066/a.py
|
abc066/a.py
|
Python
| 0.000489 |
@@ -0,0 +1,69 @@
+a, b, c = map(int, input().split())%0A%0Aprint(min(a + b, b + c, a + c))%0A
|
|
365152787cae36c12691e4da52a0575bd56d7d1b
|
Add tests for tril, triu and find
|
tests/cupyx_tests/scipy_tests/sparse_tests/test_extract.py
|
tests/cupyx_tests/scipy_tests/sparse_tests/test_extract.py
|
Python
| 0 |
@@ -0,0 +1,2760 @@
+import unittest%0A%0Aimport numpy%0Atry:%0A import scipy.sparse%0A scipy_available = True%0Aexcept ImportError:%0A scipy_available = False%0A%0Aimport cupy%0Afrom cupy import testing%0Afrom cupyx.scipy import sparse%0A%0A%[email protected](*testing.product(%7B%0A 'shape': %5B(8, 3), (4, 4), (3, 8)%5D,%0A 'a_format': %5B'dense', 'csr', 'csc', 'coo'%5D,%0A 'out_format': %5BNone, 'csr', 'csc'%5D,%0A%7D))%[email protected](scipy_available, 'requires scipy')%0Aclass TestExtract(unittest.TestCase):%0A%0A density = 0.75%0A%0A def _make_matrix(self, dtype):%0A a = testing.shaped_random(self.shape, numpy, dtype=dtype)%0A a%5Ba %3E self.density%5D = 0%0A b = cupy.array(a)%0A if self.a_format == 'csr':%0A a = scipy.sparse.csr_matrix(a)%0A b = sparse.csr_matrix(b)%0A elif self.a_format == 'csc':%0A a = scipy.sparse.csc_matrix(a)%0A b = sparse.csc_matrix(b)%0A elif self.a_format == 'coo':%0A a = scipy.sparse.coo_matrix(a)%0A b = sparse.coo_matrix(b)%0A return a, b%0A%0A @testing.for_dtypes('fdFD')%0A def test_tril(self, dtype):%0A np_a, cp_a = self._make_matrix(dtype)%0A m, n = self.shape%0A for k in range(-m+1, n):%0A np_out = scipy.sparse.tril(np_a, k=k, format=self.out_format)%0A cp_out = sparse.tril(cp_a, k=k, format=self.out_format)%0A assert np_out.format == cp_out.format%0A assert np_out.nnz == cp_out.nnz%0A cupy.testing.assert_allclose(np_out.todense(), cp_out.todense())%0A%0A @testing.for_dtypes('fdFD')%0A def test_triu(self, dtype):%0A np_a, cp_a = self._make_matrix(dtype)%0A m, n = self.shape%0A for k in range(-m+1, n):%0A np_out = scipy.sparse.triu(np_a, k=k, format=self.out_format)%0A cp_out = sparse.triu(cp_a, k=k, format=self.out_format)%0A assert np_out.format == cp_out.format%0A assert np_out.nnz == cp_out.nnz%0A cupy.testing.assert_allclose(np_out.todense(), cp_out.todense())%0A %0A @testing.for_dtypes('fdFD')%0A def test_find(self, dtype):%0A if self.out_format is not None:%0A unittest.SkipTest()%0A np_a, cp_a = self._make_matrix(dtype)%0A np_row, np_col, np_data = scipy.sparse.find(np_a)%0A cp_row, cp_col, cp_data = sparse.find(cp_a)%0A # Note: Check the results by reconstructing the sparse matrix from the%0A # results of find, as SciPy and CuPy differ in the data order.%0A np_out = scipy.sparse.coo_matrix((np_data, (np_row, np_col)),%0A shape=self.shape)%0A cp_out = sparse.coo_matrix((cp_data, (cp_row, cp_col)),%0A shape=self.shape)%0A cupy.testing.assert_allclose(np_out.todense(), cp_out.todense())%0A
|
|
a4d3056bbbe71d73d901c13927264157c9c51842
|
Add lc004_median_of_two_sorted_arrays.py
|
lc004_median_of_two_sorted_arrays.py
|
lc004_median_of_two_sorted_arrays.py
|
Python
| 0.007128 |
@@ -0,0 +1,669 @@
+%22%22%22Leetcode 4. Median of Two Sorted Arrays%0AHard%0A%0AThere are two sorted arrays nums1 and nums2 of size m and n respectively.%0A%0AFind the median of the two sorted arrays. %0AThe overall run time complexity should be O(log (m+n)).%0A%0AYou may assume nums1 and nums2 cannot be both empty.%0A%0AExample 1:%0Anums1 = %5B1, 3%5D%0Anums2 = %5B2%5D%0AThe median is 2.0%0A%0AExample 2:%0Anums1 = %5B1, 2%5D%0Anums2 = %5B3, 4%5D%0AThe median is (2 + 3)/2 = 2.5%0A%22%22%22%0A%0Aclass Solution(object):%0A def findMedianSortedArrays(self, num1, num2):%0A %22%22%22%0A :type nums1: List%5Bint%5D%0A :type nums2: List%5Bint%5D%0A :rtype: float%0A %22%22%22%0A pass%0A%0A%0Adef main():%0A pass%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
8a836213f7466de51c6d3d18d1a5ba74bb28de4a
|
Add hdf5-vol-async package. (#26874)
|
var/spack/repos/builtin/packages/hdf5-vol-async/package.py
|
var/spack/repos/builtin/packages/hdf5-vol-async/package.py
|
Python
| 0 |
@@ -0,0 +1,785 @@
+# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass Hdf5VolAsync(CMakePackage):%0A %22%22%22This package enables asynchronous IO in HDF5.%22%22%22%0A%0A homepage = %22https://sdm.lbl.gov/%22%0A git = %22https://github.com/hpc-io/vol-async%22%0A maintainers = %5B'hyoklee'%5D%0A%0A version('v1.0')%0A depends_on('argobots@main')%0A depends_on('[email protected]+mpi+threadsafe')%0A%0A def cmake_args(self):%0A %22%22%22Populate cmake arguments for HDF5 VOL.%22%22%22%0A args = %5B%0A self.define('BUILD_SHARED_LIBS:BOOL', True),%0A self.define('BUILD_TESTING:BOOL=ON', self.run_tests)%0A %5D%0A return args%0A
|
|
034fe49d29f229e8fafc6b1034fc2685cd896eb2
|
Create create-studio-item
|
my-ACG/create-studio-item/edit.py
|
my-ACG/create-studio-item/edit.py
|
Python
| 0.000002 |
@@ -0,0 +1,1657 @@
+# -*- coding: utf-8 -*-%0Aimport argparse%0Aimport csv%0Aimport os%0Aimport re%0Aimport urllib.parse%0A%0Aos.environ%5B'PYWIKIBOT_DIR'%5D = os.path.dirname(os.path.realpath(__file__))%0Aimport pywikibot%0A%0A%0Asite = pywikibot.Site()%0Asite.login()%0Adatasite = site.data_repository()%0A%0A%0Adef main(studio):%0A data = %7B%0A 'labels': %7B%0A 'zh-tw': %7B%0A 'language': 'zh-tw',%0A 'value': studio%0A %7D,%0A %7D,%0A 'sitelinks': %7B%0A 'zhwiki': %7B%0A 'site': 'zhwiki',%0A 'title': studio,%0A 'badges': %5B%5D,%0A %7D,%0A %7D,%0A # https://www.mediawiki.org/wiki/Wikibase/DataModel/JSON#Snaks%0A 'claims': %7B%0A 'P3': %5B%7B%0A 'mainsnak': %7B%0A 'snaktype': 'value',%0A 'property': 'P3',%0A 'datatype': 'wikibase-item',%0A 'datavalue': %7B%0A 'value': %7B%0A 'entity-type': 'item',%0A 'numeric-id': 65,%0A %7D,%0A 'type': 'wikibase-entityid',%0A %7D,%0A %7D,%0A 'type': 'statement',%0A 'rank': 'normal',%0A %7D%5D,%0A %7D,%0A %7D%0A%0A # claim = pywikibot.page.Claim(datasite, 'P25', datatype='wikibase-item')%0A # item.editEntity(%7B'claims': %5Bclaim.toJSON()%5D%7D)%0A%0A print(data)%0A item = datasite.editEntity(%7B%7D, data, summary=u'%E5%BB%BA%E7%AB%8B%E6%96%B0%E9%A0%85%E7%9B%AE%E4%B8%A6%E9%80%A3%E7%B5%90')%0A print(item%5B'entity'%5D%5B'id'%5D)%0A%0A%0Aif __name__ == %22__main__%22:%0A parser = argparse.ArgumentParser()%0A parser.add_argument('studio')%0A args = parser.parse_args()%0A main(args.studio)%0A
|
|
9b572d4f53b23f3dc51dbfb98d46d0daa68d3569
|
fix pep8 on core admin profile
|
opps/core/admin/profile.py
|
opps/core/admin/profile.py
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from opps.core.models import Profile
class ProfileAdmin(admin.ModelAdmin):
pass
admin.site.register(Profile, ProfileAdmin)
|
Python
| 0 |
@@ -89,16 +89,17 @@
rofile%0A%0A
+%0A
class Pr
|
71d4fee2e7a9c44de0f395883b75e1ca9976fe9e
|
fix query for oaipmh
|
portality/models/oaipmh.py
|
portality/models/oaipmh.py
|
from datetime import datetime
from copy import deepcopy
from portality.models import Journal, Article
class OAIPMHRecord(object):
earliest = {
"query": {
"bool": {
"must": [
{ "term": { "admin.in_doaj": True } }
]
}
},
"size": 1,
"sort" : [
{"last_updated": {"order": "asc"}}
]
}
sets = {
"query": {
"bool": {
"must": [
{ "term": { "admin.in_doaj": True } }
]
}
},
"size": 0,
"aggs": {
"sets": {
"terms": {
"field": "index.schema_subject.exact",
"order": {"_key" : "asc"},
"size": 100000
}
}
}
}
records = {
"query": {
"bool": {
"must": [
{ "term": { "admin.in_doaj": True } }
]
}
},
"from": 0,
"size": 25
}
set_limit = {"term" : { "index.schema_subject.exact" : "<set name>" }}
range_limit = { "range" : { "last_updated" : {"gte" : "<from date>", "lte" : "<until date>"} } }
created_sort = [{"last_updated" : {"order" : "desc"}}, {"id" : "desc"}]
def earliest_datestamp(self):
result = self.query(q=self.earliest)
return result.get("hits", {}).get("hits", [{}])[0].get("_source", {}).get("last_updated")
def identifier_exists(self, identifier):
obj = self.pull(identifier)
return obj is not None
def list_sets(self):
result = self.query(q=self.sets)
sets = [t.get("key") for t in result.get("aggregations", {}).get("sets", {}).get("buckets", [])]
return sets
def list_records(self, from_date=None, until_date=None, oai_set=None, list_size=None, start_after=None):
q = deepcopy(self.records)
if start_after is not None or from_date is not None or until_date is not None or oai_set is not None:
if oai_set is not None:
s = deepcopy(self.set_limit)
s["term"]["index.schema_subject.exact"] = oai_set
q["query"]["bool"]["must"].append(s)
if until_date is not None or from_date is not None or start_after is not None:
d = deepcopy(self.range_limit)
if start_after is not None:
d["range"]["last_updated"]["lte"] = start_after[0]
elif until_date is not None:
d["range"]["last_updated"]["lte"] = until_date
else:
del d["range"]["last_updated"]["lte"]
if from_date is not None:
d["range"]["last_updated"]["gte"] = from_date
else:
del d["range"]["last_updated"]["gte"]
q["query"]["bool"]["must"].append(d)
if list_size is not None:
q["size"] = list_size
if start_after is not None:
q["from"] = start_after[1]
else:
q["from"] = 0
q["sort"] = deepcopy(self.created_sort)
# do the query
# print json.dumps(q)
results = self.query(q=q)
total = results.get("hits", {}).get("total", {}).get('value', 0)
return total, [hit.get("_source") for hit in results.get("hits", {}).get("hits", [])]
class OAIPMHArticle(OAIPMHRecord, Article):
def list_records(self, from_date=None, until_date=None, oai_set=None, list_size=None, start_after=None):
total, results = super(OAIPMHArticle, self).list_records(from_date=from_date,
until_date=until_date, oai_set=oai_set, list_size=list_size, start_after=start_after)
return total, [Article(**r) for r in results]
def pull(self, identifier):
# override the default pull, as we care about whether the item is in_doaj
record = super(OAIPMHArticle, self).pull(identifier)
if record is not None and record.is_in_doaj():
return record
return None
class OAIPMHJournal(OAIPMHRecord, Journal):
def list_records(self, from_date=None, until_date=None, oai_set=None, list_size=None, start_after=None):
total, results = super(OAIPMHJournal, self).list_records(from_date=from_date,
until_date=until_date, oai_set=oai_set, list_size=list_size, start_after=start_after)
return total, [Journal(**r) for r in results]
def pull(self, identifier):
# override the default pull, as we care about whether the item is in_doaj
record = super(OAIPMHJournal, self).pull(identifier)
if record is not None and record.is_in_doaj():
return record
return None
|
Python
| 0.999999 |
@@ -1,34 +1,4 @@
-from datetime import datetime%0A
from
@@ -1306,16 +1306,22 @@
%7D%7D, %7B%22id
+.exact
%22 : %22des
|
419e001591566df909b03ffd0abff12171b62491
|
Create binary_search_iter.py
|
binary_search_iter.py
|
binary_search_iter.py
|
Python
| 0.000041 |
@@ -0,0 +1,526 @@
+#GLOBALS%0A#=======%0AFIRST_IDX = 0%0A%0Adef chop(number, int_list):%0A%09list_size = length(int_list)%0A%09start_idx = FIRST_IDX%0A%09end_idx = list_size-1%0A%09current_idx = end_idx/2%0A%09itr_counter = list_size%0A%09while itr_counter%3E0:%0A%09%09current_value = int_list%5Bcurrent_idx%5D%0A%09%09%09if current_value == number:%0A%09%09%09%09return current_idx%0A%09%09%09else if current_value %3E number:%0A%09%09%09%09end_idx = current_idx - 1%0A%09%09%09else if current_value %3C number:%0A%09%09%09%09start_idx = current_idx+1%0A%0A%09%09%09current_idx = (end_idx + start_idx)/2%0A%09%09%09itr_counter /=2%0A%09%09%09%0A%09if __name__==%22__main__%22:%0A%09%0A
|
|
6c4c26f5383740257b8bca56ce1ea9011053aff6
|
add new package : keepalived (#14463)
|
var/spack/repos/builtin/packages/keepalived/package.py
|
var/spack/repos/builtin/packages/keepalived/package.py
|
Python
| 0 |
@@ -0,0 +1,1453 @@
+# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass Keepalived(AutotoolsPackage):%0A %22%22%22%0A Keepalived implements a set of checkers to dynamically and adaptively%0A maintain and manage loadbalanced server pool according their health%0A %22%22%22%0A%0A homepage = %22http://www.keepalived.org%22%0A url = %22http://www.keepalived.org/software/keepalived-1.2.0.tar.gz%22%0A%0A version('2.0.19', sha256='0e2f8454765bc6a5fa26758bd9cec18aae42882843cdd24848aff0ae65ce4ca7')%0A version('2.0.18', sha256='1423a2b1b8e541211029b9e1e1452e683bbe5f4b0b287eddd609aaf5ff024fd0')%0A version('2.0.17', sha256='8965ffa2ffe243014f9c0245daa65f00a9930cf746edf33525d28a86f97497b4')%0A version('2.0.16', sha256='f0c7dc86147a286913c1c2c918f557735016285d25779d4d2fce5732fcb888df')%0A version('2.0.15', sha256='933ee01bc6346aa573453b998f87510d3cce4aba4537c9642b24e6dbfba5c6f4')%0A version('2.0.14', sha256='1bf586e56ee38b47b82f2a27b27e04d0e5b23f1810db6a8e801bde9d3eb8617b')%0A version('2.0.13', sha256='c7fb38e8a322fb898fb9f6d5d566827a30aa5a4cd1774f474bb4041c85bcbc46')%0A version('2.0.12', sha256='fd50e433d784cfd948de5726752cf89ab7001f587fe10a5110c6c7cbda4b7b5e')%0A version('2.0.11', sha256='a298b0c02a20959cfc365b62c14f45abd50d5e0595b2869f5bce10ec2392fa48')%0A%0A depends_on('openssl', type='build')%0A
|
|
5ef097bc394ef5be9b723ca0732bb842ab82e9e1
|
Include app.wsgi into repository as an example #8
|
website/app.wsgi
|
website/app.wsgi
|
Python
| 0 |
@@ -0,0 +1,652 @@
+import sys%0Afrom pathlib import Path%0A%0Apath = Path(__file__)%0A%0A# when moving virual environment, update following line%0Avenv_location = str(path.parents%5B2%5D)%0A%0A# in Python3 there is no builtin execfile shortcut - let's define one%0Adef execfile(filename):%0A globals = dict( __file__ = filename)%0A exec(open(filename).read(), globals)%0A%0A# add application directory to execution path%0Asys.path.insert(0, str(path.parent))%0Asys.path.insert(0, str(path.parents%5B1%5D))%0A%0A# activate virual environment%0Aactivate_this = venv_location + '/virtual_environment/bin/activate_this.py'%0Aexecfile(activate_this)%0A%0A# import application to serve%0Afrom app import app as application%0A
|
|
8d6676f2e19ab9df01c681b6590c6f4adb0f938c
|
add profile model
|
fbmsgbot/models/profile.py
|
fbmsgbot/models/profile.py
|
Python
| 0.000001 |
@@ -0,0 +1,284 @@
+class Profile():%0A %0A def __init__(self, **kwargs):%0A self.first_name = kwargs%5B'first_name'%5D%0A%09self.last_name = kwargs%5B'last_name'%5D%0A%09self.profile_pic = kwargs%5B'profile_pic'%5D%0A%09self.locale = kwargs%5B'locale'%5D%0A%09self.timezone = kwargs%5B'timezone'%5D%0A%09self.gender = kwargs%5B'gender'%5D%0A%0A
|
|
a57b877313f1dc7aa3a6c5c31b925f5c14ddf791
|
Fix some minor docstring bugs
|
nikola/plugins/command/console.py
|
nikola/plugins/command/console.py
|
# -*- coding: utf-8 -*-
# Copyright © 2012-2016 Chris Warrick, Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Start debugging console."""
from __future__ import print_function, unicode_literals
import os
from nikola import __version__
from nikola.plugin_categories import Command
from nikola.utils import get_logger, STDERR_HANDLER, req_missing, Commands
LOGGER = get_logger('console', STDERR_HANDLER)
class CommandConsole(Command):
"""Start debugging console."""
name = "console"
shells = ['ipython', 'bpython', 'plain']
doc_purpose = "start an interactive Python console with access to your site"
doc_description = """\
The site engine is accessible as `site`, the config file as `conf`, and commands are available as `commands`.
If there is no console to use specified (as -b, -i, -p) it tries IPython, then falls back to bpython, and finally falls back to the plain Python console."""
header = "Nikola v" + __version__ + " -- {0} Console (conf = configuration file, site = site engine, commands = nikola commands)"
cmd_options = [
{
'name': 'bpython',
'short': 'b',
'long': 'bpython',
'type': bool,
'default': False,
'help': 'Use bpython',
},
{
'name': 'ipython',
'short': 'i',
'long': 'plain',
'type': bool,
'default': False,
'help': 'Use IPython',
},
{
'name': 'plain',
'short': 'p',
'long': 'plain',
'type': bool,
'default': False,
'help': 'Use the plain Python interpreter',
},
]
def ipython(self, willful=True):
"""IPython shell."""
try:
import IPython
except ImportError as e:
if willful:
req_missing(['IPython'], 'use the IPython console')
raise e # That’s how _execute knows whether to try something else.
else:
site = self.context['site'] # NOQA
conf = self.context['conf'] # NOQA
commands = self.context['commands'] # NOQA
IPython.embed(header=self.header.format('IPython'))
def bpython(self, willful=True):
"""bpython shell."""
try:
import bpython
except ImportError as e:
if willful:
req_missing(['bpython'], 'use the bpython console')
raise e # That’s how _execute knows whether to try something else.
else:
bpython.embed(banner=self.header.format('bpython'), locals_=self.context)
def plain(self, willful=True):
"""Plain Python shell."""
import code
try:
import readline
except ImportError:
pass
else:
import rlcompleter
readline.set_completer(rlcompleter.Completer(self.context).complete)
readline.parse_and_bind("tab:complete")
pythonrc = os.environ.get("PYTHONSTARTUP")
if pythonrc and os.path.isfile(pythonrc):
try:
execfile(pythonrc) # NOQA
except NameError:
pass
code.interact(local=self.context, banner=self.header.format('Python'))
def _execute(self, options, args):
"""Start the console."""
self.site.scan_posts()
# Create nice object with all commands:
self.site.commands = Commands(self.site.doit, self.config, self._doitargs)
self.context = {
'conf': self.site.config,
'site': self.site,
'commands': self.site.commands,
}
if options['bpython']:
self.bpython(True)
elif options['ipython']:
self.ipython(True)
elif options['plain']:
self.plain(True)
else:
for shell in self.shells:
try:
return getattr(self, shell)(False)
except ImportError:
pass
raise ImportError
|
Python
| 0 |
@@ -2782,16 +2782,23 @@
%22%22%22
+Run an
IPython
@@ -3331,16 +3331,22 @@
%22%22%22
+Run a
bpython
@@ -3747,17 +3747,23 @@
%22%22%22
-P
+Run a p
lain Pyt
|
7a3a6720a47f380cf20a06aaa47634675099bf92
|
Django learning site forms: add model forms QuizForm, TrueFalseQuestionForm, MultipleChoiceQuestionForm
|
python/django/learning_site_forms/courses/forms.py
|
python/django/learning_site_forms/courses/forms.py
|
Python
| 0.997456 |
@@ -0,0 +1,611 @@
+from django import forms%0A%0Afrom . import models%0A%0Aclass QuizForm(forms.ModelForm):%0A class Meta:%0A model = models.Quiz%0A fields = %5B%0A 'title',%0A 'description',%0A 'order',%0A 'total_questions',%0A %5D%0A%0A%0Aclass TrueFalseQuestionForm(forms.ModelForm):%0A class Meta:%0A model = models.TrueFalseQuestion%0A fields = %5B'order', 'prompt'%5D%0A%0A%0Aclass MultipleChoiceQuestionForm(forms.ModelForm):%0A class Meta:%0A model = models.MultipleChoiceQuestion%0A fields = %5B'order',%0A 'prompt',%0A 'shuffle_answers'%0A %5D
|
|
8d12f36f7b10ff3c6c6296a20f488c3ec18715c5
|
Convert to Package
|
Integrations/Whois/Whois.py
|
Integrations/Whois/Whois.py
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import socket
import socks
import whois
from urlparse import urlparse
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
''' GLOBAL VARS '''
if not demisto.params().get('proxy', False):
del os.environ['HTTP_PROXY']
del os.environ['HTTPS_PROXY']
del os.environ['http_proxy']
del os.environ['https_proxy']
PROXY = demisto.params()['proxy']
# Setting proxy to be used in Socks
if PROXY is True:
proxy_url = os.environ.get('HTTPS_PROXY', None)
if proxy_url:
uri = urlparse(proxy_url)
socks.set_default_proxy(socks.PROXY_TYPE_HTTP, uri.hostname, uri.port)
socket.socket = socks.socksocket
DOMAIN = demisto.args().get('query')
''' HELPER FUNCTIONS '''
# Returns an item in a list at a given index
def list_tool(item, list, number):
if isinstance(item, list):
return str(item[number])
else:
return item
# converts inputs into a string w/o u' prepended
def my_converter(obj):
if isinstance(obj, datetime):
return obj.__str__()
else:
return obj
# Converts a list of time objects into human readable format
def time_list_tool(obj):
tformat = '%m/%d/%Y %H:%M:%S %p'
if obj is not None and isinstance(obj, list):
for string in obj:
my_converter(string)
return string
else:
return obj
'''COMMANDS'''
def whois_command():
try:
whois_result = whois.whois(DOMAIN)
md = {}
try:
for key in whois_result:
value = whois_result[key]
value = my_converter(value)
key = string_to_table_header(key)
md.update({key: value})
except:
demisto.results('No result was found for {}'.format(DOMAIN))
ec = {}
ec.update({
'Domain': {
'Name': str(list_tool(whois_result.domain_name, list, 0)),
'Whois': {
'Domain': str(list_tool(whois_result.domain_name, list, 0)),
'DomainStatus': whois_result.status,
'DNSSec': str(whois_result.dnssec),
'Raw': str(whois_result),
'NameServers': whois_result.name_servers,
'CreationDate': str(time_list_tool(whois_result.creation_date)),
'UpdatedDate': str(time_list_tool(whois_result.updated_date)),
'ExpirationDate': str(time_list_tool(whois_result.expiration_date)),
'Registrar': {
'Name': str(whois_result.registrar),
'AbuseEmail': str(list_tool(whois_result.emails, list, 0))
},
'Registrant': {
'Name': str(whois_result.get('name')),
'Email': str(list_tool(whois_result.emails, list, 1))
}
}
}
})
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['markdown'],
'Contents': str(whois_result),
'HumanReadable': tableToMarkdown('Whois results for {}'.format(DOMAIN), md, removeNull=True),
'EntryContext': createContext(ec, removeNull=True)
})
except OSError as msg:
return_error(msg)
def test_command():
try:
whois_result = whois.whois('google.com')
domain_test = list_tool(whois_result.domain_name, list, 1)
if domain_test == 'google.com':
demisto.results('ok')
except:
demisto.results('error')
''' EXECUTION CODE '''
LOG('command is %s' % (demisto.command(), ))
try:
if demisto.command() == 'test-module':
# This is the call made when pressing the integration test button.
test_command()
elif demisto.command() == 'whois':
whois_command()
except Exception as e:
LOG(e)
LOG.print_log(False)
return_error(e.message)
|
Python
| 0.999999 |
@@ -263,16 +263,31 @@
rning)%0A%0A
+# flake8: noqa%0A
%0A''' GLO
|
37f80e8a8a86612f44fd8e6cd3b15d73bc737404
|
Fix reuse bug?
|
tools/profiling/microbenchmarks/bm_json.py
|
tools/profiling/microbenchmarks/bm_json.py
|
# Copyright 2017, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
_BM_SPECS = {
'BM_UnaryPingPong': {
'tpl': ['fixture', 'client_mutator', 'server_mutator'],
'dyn': ['request_size', 'response_size'],
},
'BM_PumpStreamClientToServer': {
'tpl': ['fixture'],
'dyn': ['request_size'],
},
'BM_PumpStreamServerToClient': {
'tpl': ['fixture'],
'dyn': ['request_size'],
},
'BM_StreamingPingPong': {
'tpl': ['fixture', 'client_mutator', 'server_mutator'],
'dyn': ['request_size', 'request_count'],
},
'BM_StreamingPingPongMsgs': {
'tpl': ['fixture', 'client_mutator', 'server_mutator'],
'dyn': ['request_size'],
},
'BM_PumpStreamServerToClient_Trickle': {
'tpl': [],
'dyn': ['request_size', 'bandwidth_kilobits'],
},
'BM_ErrorStringOnNewError': {
'tpl': ['fixture'],
'dyn': [],
},
'BM_ErrorStringRepeatedly': {
'tpl': ['fixture'],
'dyn': [],
},
'BM_ErrorGetStatus': {
'tpl': ['fixture'],
'dyn': [],
},
'BM_ErrorGetStatusCode': {
'tpl': ['fixture'],
'dyn': [],
},
'BM_ErrorHttpError': {
'tpl': ['fixture'],
'dyn': [],
},
'BM_HasClearGrpcStatus': {
'tpl': ['fixture'],
'dyn': [],
},
'BM_IsolatedFilter': {
'tpl': ['fixture', 'client_mutator'],
'dyn': [],
},
'BM_HpackEncoderEncodeHeader': {
'tpl': ['fixture'],
'dyn': ['end_of_stream', 'request_size'],
},
'BM_HpackParserParseHeader': {
'tpl': ['fixture'],
'dyn': [],
},
'BM_CallCreateDestroy': {
'tpl': ['fixture'],
'dyn': [],
},
'BM_Zalloc': {
'tpl': [],
'dyn': ['request_size'],
},
'BM_PollEmptyPollset_SpeedOfLight': {
'tpl': [],
'dyn': ['request_size', 'request_count'],
},
'BM_StreamCreateSendInitialMetadataDestroy': {
'tpl': ['fixture'],
'dyn': [],
},
'BM_TransportStreamSend': {
'tpl': [],
'dyn': ['request_size'],
},
'BM_TransportStreamRecv': {
'tpl': [],
'dyn': ['request_size'],
},
'BM_StreamingPingPongWithCoalescingApi': {
'tpl': ['fixture', 'client_mutator', 'server_mutator'],
'dyn': ['request_size', 'request_count', 'end_of_stream'],
},
'BM_Base16SomeStuff': {
'tpl': [],
'dyn': ['request_size'],
}
}
def numericalize(s):
if not s: return ''
if s[-1] == 'k':
return float(s[:-1]) * 1024
if s[-1] == 'M':
return float(s[:-1]) * 1024 * 1024
if 0 <= (ord(s[-1]) - ord('0')) <= 9:
return float(s)
assert 'not a number: %s' % s
def parse_name(name):
cpp_name = name
if '<' not in name and '/' not in name and name not in _BM_SPECS:
return {'name': name, 'cpp_name': name}
rest = name
out = {}
tpl_args = []
dyn_args = []
if '<' in rest:
tpl_bit = rest[rest.find('<') + 1 : rest.rfind('>')]
arg = ''
nesting = 0
for c in tpl_bit:
if c == '<':
nesting += 1
arg += c
elif c == '>':
nesting -= 1
arg += c
elif c == ',':
if nesting == 0:
tpl_args.append(arg.strip())
arg = ''
else:
arg += c
else:
arg += c
tpl_args.append(arg.strip())
rest = rest[:rest.find('<')] + rest[rest.rfind('>') + 1:]
if '/' in rest:
s = rest.split('/')
rest = s[0]
dyn_args = s[1:]
name = rest
assert name in _BM_SPECS, '_BM_SPECS needs to be expanded for %s' % name
assert len(dyn_args) == len(_BM_SPECS[name]['dyn'])
assert len(tpl_args) == len(_BM_SPECS[name]['tpl'])
out['name'] = name
out['cpp_name'] = cpp_name
out.update(dict((k, numericalize(v)) for k, v in zip(_BM_SPECS[name]['dyn'], dyn_args)))
out.update(dict(zip(_BM_SPECS[name]['tpl'], tpl_args)))
return out
def expand_json(js, js2 = None):
for bm in js['benchmarks']:
if bm['name'].endswith('_stddev') or bm['name'].endswith('_mean'): continue
context = js['context']
if 'label' in bm:
labels_list = [s.split(':') for s in bm['label'].strip().split(' ') if len(s) and s[0] != '#']
for el in labels_list:
el[0] = el[0].replace('/iter', '_per_iteration')
labels = dict(labels_list)
else:
labels = {}
row = {
'jenkins_build': os.environ.get('BUILD_NUMBER', ''),
'jenkins_job': os.environ.get('JOB_NAME', ''),
}
row.update(context)
row.update(bm)
row.update(parse_name(row['name']))
row.update(labels)
if js2:
for bm2 in js2['benchmarks']:
if bm['name'] == bm2['name'] and 'already_used' not in bm2:
row['cpu_time'] = bm2['cpu_time']
row['real_time'] = bm2['real_time']
row['iterations'] = bm2['iterations']
bm2['already_used'] = True
yield row
|
Python
| 0 |
@@ -6116,16 +6116,32 @@
= True%0A
+ break%0A
yiel
|
519b141349b4d39902416be560b989160d48b141
|
add echo_delay to estimate the delay between two wav files
|
echo_delay.py
|
echo_delay.py
|
Python
| 0.000001 |
@@ -0,0 +1,672 @@
+%0Aimport sys%0Aimport wave%0Aimport numpy as np%0Afrom gcc_phat import gcc_phat%0A%0A%0Aif len(sys.argv) != 3:%0A print('Usage: %7B%7D near.wav far.wav'.format(sys.argv%5B0%5D))%0A sys.exit(1)%0A%0A%0Anear = wave.open(sys.argv%5B1%5D, 'rb')%0Afar = wave.open(sys.argv%5B2%5D, 'rb')%0Arate = near.getframerate()%0A%0AN = rate%0A%0Awindow = np.hanning(N)%0A%0Awhile True:%0A sig = near.readframes(N)%0A if len(sig) != 2 * N:%0A break%0A%0A ref = far.readframes(N)%0A sig_buf = np.fromstring(sig, dtype='int16')%0A ref_buf = np.fromstring(ref, dtype='int16')%0A tau = gcc_phat(sig_buf * window, ref_buf * window, fs=rate, max_tau=1)%0A # tau = gcc_phat(sig_buf, ref_buf, fs=rate, max_tau=1)%0A print(tau * 1000)%0A%0A
|
|
c71924d4baea473a36f0c22f0878fea7a9ff2800
|
Create constants.py
|
a2/constants.py
|
a2/constants.py
|
Python
| 0.000006 |
@@ -0,0 +1,641 @@
+import re%0Aimport time%0A%0A#first link to view the cruise %0Abase_link = 'https://www.princess.com/find/cruiseDetails.do?voyageCode=2801'%0A%0A#element to find%0Abutton_element = 'md-hidden'%0A%0A#gets the current time%0Atime = time.strftime('%25I:%25M:%25S')%0A%0Aforming = 'building request'%0Aseperator = '%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93%E2%80%93'%0A%0A#xpath%0A# //*%5Bcontains(@class, 'col-pax-item selectable-blue-arrow col-xs-12 col-xs-pad-0 clearfix'), (@button, '')%5D%0A#//button%5B@data-num-pax=%224%22%5D/text()%0A#//button%5B@data-num-pax=%224%22%5D%0A# //*%5Bcontains(@class, 'col-pax-item selectable-blue-arrow col-xs-12 col-xs-pad-0 clearfix')//%5Bcontains(@button%5Bcontains(text(),'4')%5D)%5D%5D%0A
|
|
3dcd012977d4dfea69ec4a51650ac9a4fd375842
|
add missing migration file
|
registration/migrations/0007_auto_20160416_1217.py
|
registration/migrations/0007_auto_20160416_1217.py
|
Python
| 0.000001 |
@@ -0,0 +1,494 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.9.2 on 2016-04-16 03:17%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('registration', '0006_auto_20160416_1202'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='registration',%0A name='payment_message',%0A field=models.CharField(blank=True, max_length=255, null=True),%0A ),%0A %5D%0A
|
|
64d675304b2d66d89e55dcff167d1dd20e6b000c
|
add fragment molecule class for monte carlo simulation
|
afm/molecule.py
|
afm/molecule.py
|
Python
| 0 |
@@ -0,0 +1,194 @@
+%0Aclass FragmentMolecule(object):%0A%0A%09def __init__(self, composition):%0A%09%09self.composition = composition%0A%0A%09def __str__(self):%0A%09%09%22%22%22%0A%09%09Return a string representation.%0A%09%09%22%22%22%0A%09%09return self.composition%0A
|
|
f3c2b9087a06b508a278cb8e6f79200caae1ac07
|
Add a tool to encode udot instructions in asm code so we compile on any toolchain.
|
standalone/encode.py
|
standalone/encode.py
|
Python
| 0 |
@@ -0,0 +1,1507 @@
+import sys%0Aimport re%0A%0A%0Adef encode_udot_vector(line):%0A m = re.search(%0A r'%5Cbudot%5B %5D+v(%5B0-9%5D+)%5B %5D*.%5B %5D*4s%5B %5D*,%5B %5D*v(%5B0-9%5D+)%5B %5D*.%5B %5D*16b%5B %5D*,%5B %5D*v(%5B0-9%5D+)%5B %5D*.%5B %5D*16b',%0A line)%0A if not m:%0A return 0, line%0A%0A match = m.group(0)%0A accum = int(m.group(1))%0A lhs = int(m.group(2))%0A rhs = int(m.group(3))%0A assert accum %3E= 0 and accum %3C= 31%0A assert lhs %3E= 0 and lhs %3C= 31%0A assert rhs %3E= 0 and rhs %3C= 31%0A mcode = 0x6e809400 %7C (accum %3C%3C 0) %7C (lhs %3C%3C 5) %7C (rhs %3C%3C 16)%0A return mcode, match%0A%0A%0Adef encode_udot_element(line):%0A m = re.search(%0A r'%5Cbudot%5B %5D+v(%5B0-9%5D+)%5B %5D*.%5B %5D*4s%5B %5D*,%5B %5D*v(%5B0-9%5D+)%5B %5D*.%5B %5D*16b%5B %5D*,%5B %5D*v(%5B0-9%5D+)%5B %5D*.%5B %5D*4b%5B %5D*%5C%5B(%5B0-9%5D)%5C%5D',%0A line)%0A if not m:%0A return 0, line%0A%0A match = m.group(0)%0A accum = int(m.group(1))%0A lhs = int(m.group(2))%0A rhs = int(m.group(3))%0A lanegroup = int(m.group(4))%0A assert accum %3E= 0 and accum %3C= 31%0A assert lhs %3E= 0 and lhs %3C= 31%0A assert rhs %3E= 0 and rhs %3C= 31%0A assert lanegroup %3E= 0 and lanegroup %3C= 3%0A l = 1 if lanegroup & 1 else 0%0A h = 1 if lanegroup & 2 else 0%0A mcode = 0x6f80e000 %7C (accum %3C%3C 0) %7C (lhs %3C%3C 5) %7C (rhs %3C%3C 16) %7C (l %3C%3C 21) %7C (%0A h %3C%3C 11)%0A return mcode, match%0A%0A%0Adef encode(line):%0A mcode, match = encode_udot_vector(line)%0A if mcode:%0A return mcode, match%0A mcode, match = encode_udot_element(line)%0A if mcode:%0A return mcode, match%0A return 0, line%0A%0A%0Afor line in sys.stdin:%0A mcode, match = encode(line)%0A if mcode:%0A line = line.replace(match, '.word 0x%25x // %25s' %25 (mcode, match))%0A sys.stdout.write(line)%0A
|
|
fb8b1d7cb6e98e97fb383ca7457cb1cd237f8184
|
Add usernamer.py
|
examples/username.py
|
examples/username.py
|
Python
| 0.000013 |
@@ -0,0 +1,447 @@
+# Madenning Username Generator%0A# Returns first char of first name and first 7 chars of last name%0A%0A%0Adef usernamer(first_name, last_name):%0A username = first_name%5B0%5D + last_name%5B:7%5D%0A return username.lower()%0A%0A%0Aif __name__ == '__main__':%0A # Testing%0A assert usernamer(%22Joshua%22, %22Wedekind%22) == %22jwedekin%22%0A%0A%0A first_name = input(%22Enter first name: %22)%0A last_name = input(%22Enter last name: %22)%0A%0A%0A print(usernamer(first_name, last_name))%0A%0A
|
|
1417d5345d68ef67ba6e832bbc45b8f0ddd911bc
|
Create testTemplate.py
|
data_structures/linked_list/utils/testTemplate.py
|
data_structures/linked_list/utils/testTemplate.py
|
Python
| 0.000001 |
@@ -0,0 +1,856 @@
+# A test template for Python solutions.%0A%0Aimport sys%0A%0Adef TestMain(sol, log=sys.stdout, doNotLogPassed=True) -%3E bool:%0A %22%22%22%0A @param sol: the function to be tested.%0A @param log: a stream or a file to log the tester output to.%0A @param doNotLogPassed: if True, all successful tests will not be logged.%0A @return: True if all tests in the TESTS array were successful, False otherwise.%0A%0A All tester functions should follow the signature%0A of the TestMain function.%0A %22%22%22%0A %0A def TestPredefined(solution: function, log):%0A raise NotImplementedError()%0A %0A # Please add all tester functions to the TESTS tuple.%0A TESTS = (TestPredefined, )%0A areAllPassed = True%0A%0A for Test in TESTS:%0A if not Test(solution, log):%0A areAllPassed = False%0A%0A return areAllPassed%0A
|
|
3ea318cf5c1b66106bf496d513efdd6e86d0f665
|
add vowpal_wabbit requirement installation
|
robustus/detail/install_vowpal_wabbit.py
|
robustus/detail/install_vowpal_wabbit.py
|
Python
| 0 |
@@ -0,0 +1,3096 @@
+# =============================================================================%0A# COPYRIGHT 2013 Brain Corporation.%0A# License under MIT license (see LICENSE file)%0A# =============================================================================%0A%0Aimport logging%0Aimport os%0Afrom requirement import RequirementException%0Afrom utility import unpack, safe_remove, run_shell, ln %0Aimport shutil%0Aimport subprocess%0A%0A%0Adef install(robustus, requirement_specifier, rob_file, ignore_index):%0A cwd = os.getcwd()%0A os.chdir(robustus.cache)%0A%0A install_dir = os.path.join(robustus.cache, 'vowpal_wabbit-%25s' %25 requirement_specifier.version)%0A%0A # try to download precompiled Vowpal Wabbit from the remote cache first%0A if not os.path.isdir(install_dir) and not ignore_index:%0A wabbit_archive = robustus.download_compiled_archive('vowpal_wabbit', requirement_specifier.version)%0A if wabbit_archive is not None:%0A unpack(wabbit_archive)%0A logging.info('Initializing compiled vowpal_wabbit')%0A # install into wheelhouse%0A if not os.path.exists(install_dir):%0A raise RequirementException(%22Failed to unpack precompiled vowpal_wabbit archive%22)%0A%0A if not os.path.isdir(install_dir) and not ignore_index:%0A archive_name = '%25s.tar.gz' %25 requirement_specifier.version # e.g. %227.7.tar.gz%22%0A if os.path.exists(archive_name):%0A safe_remove(archive_name)%0A # move sources to a folder in order to use a clean name for installation%0A src_dir = 'vowpal_wabbit-%25s' %25 requirement_specifier.version%0A if os.path.exists(src_dir):%0A safe_remove(src_dir)%0A run_shell(%5B'wget', 'https://github.com/JohnLangford/vowpal_wabbit/archive/%25s' %25 (archive_name,)%5D,%0A verbose=robustus.settings%5B'verbosity'%5D %3E= 1)%0A run_shell(%5B'tar', 'zxvf', archive_name%5D,%0A verbose=robustus.settings%5B'verbosity'%5D %3E= 1)%0A%0A if os.path.exists(src_dir+'_src'):%0A safe_remove(src_dir+'_src')%0A%0A shutil.move(src_dir, src_dir+'_src')%0A src_dir += '_src'%0A%0A os.chdir(src_dir)%0A if os.path.exists(install_dir):%0A safe_remove(install_dir)%0A os.mkdir(install_dir)%0A%0A retcode = run_shell(%5B'make'%5D, verbose=robustus.settings%5B'verbosity'%5D %3E= 1)%0A%0A if retcode:%0A raise RequirementException('Failed to compile Vowpal Wabbit')%0A %0A retcode = run_shell('make install', shell=True)%0A if retcode:%0A raise RequirementException('Failed install Vowpal Wabbit')%0A%0A os.chdir(robustus.cache)%0A shutil.rmtree(src_dir)%0A%0A venv_install_folder = os.path.join(robustus.env, 'vowpal_wabbit')%0A if os.path.exists(venv_install_folder):%0A safe_remove(venv_install_folder) %0A shutil.copytree(install_dir, venv_install_folder)%0A executable_path = os.path.join(install_dir, 'bin', 'vw')%0A ln(executable_path, os.path.join(robustus.env, 'bin', 'vw'), force=True)%0A os.chdir(cwd)%0A%0A # now install python part%0A robustus.install_through_wheeling(requirement_specifier, rob_file, ignore_index)%0A
|
|
30567284410b9bb7154b8d39e5dfe7bc4bb1b269
|
Add migration for on_delete SET_NULL
|
herald/migrations/0006_auto_20170825_1813.py
|
herald/migrations/0006_auto_20170825_1813.py
|
Python
| 0.000027 |
@@ -0,0 +1,610 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.9.8 on 2017-08-25 23:13%0Afrom __future__ import unicode_literals%0A%0Afrom django.conf import settings%0Afrom django.db import migrations, models%0Aimport django.db.models.deletion%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('herald', '0005_merge_20170407_1316'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='sentnotification',%0A name='user',%0A field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),%0A ),%0A %5D%0A
|
|
1999295556ba404c7542d2001d7fdca80de54b5f
|
update api
|
functions/bcftools/main.py
|
functions/bcftools/main.py
|
Python
| 0 |
@@ -0,0 +1,971 @@
+%22%22%22%0ALambda example with external dependency%0A%22%22%22%0A%0Aimport logging%0Afrom subprocess import Popen, PIPE%0Aimport json%0A%0Alogger = logging.getLogger()%0Alogger.setLevel(logging.INFO)%0A%0Adef return_msg(out, err, status = 200):%0A return %7B%0A 'statusCode': status,%0A 'body': json.dumps(%7B%22out%22: out, %22err%22: err%7D),%0A 'headers': %7B%0A 'Content-Type': 'application/json',%0A %7D%0A %7D%0A%0A%0Adef handle(event, context):%0A logger.info(%22%25s ------ %25s%22, event, context)%0A if 'body' not in event:%0A return return_msg(None, %22Error: must specify VCF and region%22, 400)%0A body = event%5B'body'%5D%0A if 'vcf' not in body:%0A return return_msg(None, %22Error: must specify VCF and region%22, 400)%0A%0A %0A logger.info(%22%25s%22, event%5B'body'%5D)%0A out, err = Popen(%5B%22./bcftools%22%5D, stdout = PIPE, stderr = PIPE).communicate()%0A logger.info(out + %22 out%22)%0A logger.info(err + %22 err%22)%0A%0A return return_msg(out, err, 200)%0A
|
|
f3c4bac262c6d09730b3f0c4a24639fde8b4d923
|
Add wsgi compatible example gunicorn application
|
gunicorn-app.py
|
gunicorn-app.py
|
Python
| 0 |
@@ -0,0 +1,1198 @@
+from __future__ import unicode_literals%0A%0Aimport multiprocessing%0A%0Aimport gunicorn.app.base%0A%0Afrom gunicorn.six import iteritems%0A%0A%0Adef number_of_workers():%0A return (multiprocessing.cpu_count() * 2) + 1%0A%0A%0Adef handler_app(environ, start_response):%0A response_body = b'Works fine'%0A status = '200 OK'%0A%0A response_headers = %5B%0A ('Content-Type', 'text/plain'),%0A %5D%0A%0A start_response(status, response_headers)%0A%0A return %5Bresponse_body%5D%0A%0A%0Aclass StandaloneApplication(gunicorn.app.base.BaseApplication):%0A%0A def __init__(self, app, options=None):%0A self.options = options or %7B%7D%0A self.application = app%0A super(StandaloneApplication, self).__init__()%0A%0A def load_config(self):%0A config = dict(%5B(key, value) for key, value in iteritems(self.options)%0A if key in self.cfg.settings and value is not None%5D)%0A for key, value in iteritems(config):%0A self.cfg.set(key.lower(), value)%0A%0A def load(self):%0A return self.application%0A%0A%0Aif __name__ == '__main__':%0A options = %7B%0A 'bind': '%25s:%25s' %25 ('127.0.0.1', '8080'),%0A 'workers': number_of_workers(),%0A %7D%0A StandaloneApplication(handler_app, options).run()%0A
|
|
8d8522c95492f034db2a43e95a6c9cd3fb60c798
|
Create glove2word2vec.py
|
glove2word2vec.py
|
glove2word2vec.py
|
Python
| 0.000735 |
@@ -0,0 +1,2707 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A#%0A# Copyright (C) 2016 Manas Ranjan Kar %[email protected]%3E%0A# Licensed under the MIT License https://opensource.org/licenses/MIT%0A%0A%22%22%22%0ACLI USAGE: python glove2word2vec.py %3CGloVe vector file%3E %3COutput model file%3E%0A%0AConvert GloVe vectors into Gensim compatible format to instantiate from an existing file on disk in the word2vec C format;%0A%0Amodel = gensim.models.Word2Vec.load_word2vec_format('/tmp/vectors.txt', binary=False) # C text format%0A%0Aword2vec embeddings start with a line with the number of lines (tokens?) and the number of dimensions of the file. This allows gensim to allocate memory %0Aaccordingly for querying the model. Larger dimensions mean larger memory is held captive. Accordingly, this line has to be inserted into the GloVe %0Aembeddings file.%0A%0A%22%22%22%0Aimport re%0Aimport sys%0Aimport gensim%0Aimport smart_open%0A%0A%0Adef glove2word2vec(glove_vector_file,output_model_file):%0A%0A %0A def get_info(glove_file_name):%0A %22%22%22 %0A Function to calculate the number of lines and dimensions of the GloVe vectors to make it Gensim compatible%0A %22%22%22%0A num_lines = sum(1 for line in smart_open.smart_open(glove_vector_file))%0A if 'twitter' in glove_file_name:%0A dims= re.findall('%5Cd+',glove_vector_file.split('.')%5B3%5D)%0A dims=''.join(dims)%0A else:%0A dims=re.findall('%5Cd+',glove_vector_file.split('.')%5B2%5D)%0A dims=''.join(dims)%0A return num_lines,dims%0A %0A def prepend_line(infile, outfile, line):%0A %22%22%22 %0A Function to prepend lines using smart_open%0A %22%22%22%0A with smart_open.smart_open(infile, 'rb') as old:%0A with smart_open.smart_open(outfile, 'wb') as new:%0A new.write(str(line) + %22%5Cn%22)%0A for line in old:%0A new.write(line)%0A return outfile%0A %0A %0A num_lines,dims=get_info(glove_vector_file)%0A gensim_first_line = %22%7B%7D %7B%7D%22.format(num_lines, dims)%0A %0A print '%25s lines with %25s dimensions' %25(num_lines,dims)%0A %0A model_file=prepend_line(glove_vector_file,output_model_file,gensim_first_line)%0A %0A # Demo: Loads the newly created glove_model.txt into gensim API. %0A model=gensim.models.Word2Vec.load_word2vec_format(model_file,binary=False) #GloVe Model %0A print 'Most similar to king are: ', model.most_similar(positive=%5B'king'%5D, topn=10)%0A print 'Similarity score between woman and man is: ', model.similarity('woman', 'man')%0A print 'Model %25s successfully created !!'%25output_model_file%0A %0A return model_file%0A%0Aif __name__ == %22__main__%22:%0A%0A glove_vector_file=sys.argv%5B1%5D%0A output_model_file=sys.argv%5B2%5D%0A glove2word2vec(glove_vector_file,output_model_file)%0A
|
|
c718cf1d483b2570b886269cf990458b195500b5
|
Remove Access-Control-Allow-Origin after all
|
gratipay/utils/cache_static.py
|
gratipay/utils/cache_static.py
|
"""
Handles caching of static resources.
"""
from base64 import b64encode
from hashlib import md5
from aspen import Response
ETAGS = {}
def asset_etag(path):
if path.endswith('.spt'):
return ''
if path in ETAGS:
h = ETAGS[path]
else:
with open(path) as f:
h = ETAGS[path] = b64encode(md5(f.read()).digest(), '-_').replace('=', '~')
return h
# algorithm functions
def get_etag_for_file(dispatch_result):
return {'etag': asset_etag(dispatch_result.match)}
def try_to_serve_304(website, dispatch_result, request, etag):
"""Try to serve a 304 for static resources.
"""
if not etag:
# This is a request for a dynamic resource.
return
qs_etag = request.line.uri.querystring.get('etag')
if qs_etag and qs_etag != etag:
# Don't serve one version of a file as if it were another.
raise Response(410)
headers_etag = request.headers.get('If-None-Match')
if not headers_etag:
# This client doesn't want a 304.
return
if headers_etag != etag:
# Cache miss, the client sent an old or invalid etag.
return
# Huzzah!
# =======
# We can serve a 304! :D
raise Response(304)
def add_caching_to_response(website, response, request=None, etag=None):
"""Set caching headers for static resources.
"""
if etag is None:
return
assert request is not None # sanity check
if response.code not in (200, 304):
return
# https://developers.google.com/speed/docs/best-practices/caching
response.headers['Vary'] = 'accept-encoding'
response.headers['Etag'] = etag
if 'Access-Control-Allow-Origin' not in response.headers:
response.headers['Access-Control-Allow-Origin'] = 'https://gratipay.com'
if request.line.uri.querystring.get('etag'):
# We can cache "indefinitely" when the querystring contains the etag.
response.headers['Cache-Control'] = 'public, max-age=31536000'
else:
# Otherwise we cache for 5 seconds
response.headers['Cache-Control'] = 'public, max-age=5'
|
Python
| 0 |
@@ -1662,151 +1662,8 @@
etag
-%0A if 'Access-Control-Allow-Origin' not in response.headers:%0A response.headers%5B'Access-Control-Allow-Origin'%5D = 'https://gratipay.com'
%0A%0A
|
2f6bfddbff166115e59db7763a62258a06b4e789
|
Apply orphaned migration
|
project/apps/api/migrations/0010_remove_chart_song.py
|
project/apps/api/migrations/0010_remove_chart_song.py
|
Python
| 0 |
@@ -0,0 +1,345 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('api', '0009_auto_20150722_1041'),%0A %5D%0A%0A operations = %5B%0A migrations.RemoveField(%0A model_name='chart',%0A name='song',%0A ),%0A %5D%0A
|
|
0f787b6b8a6a069edb0f654cdf9000a8dba3277c
|
Version Change
|
geocoder/__init__.py
|
geocoder/__init__.py
|
#!/usr/bin/python
# coding: utf8
from __future__ import absolute_import
"""
Geocoder
~~~~~~~~
Simple and consistent geocoding library written in Python.
Many online providers such as Google & Bing have geocoding services,
these providers do not include Python libraries and have different
JSON responses between each other.
Consistant JSON responses from various providers.
>>> g = geocoder.google('New York City')
>>> g.latlng
[40.7127837, -74.0059413]
>>> g.state
'New York'
>>> g.json
...
"""
__title__ = 'geocoder'
__author__ = 'Denis Carriere'
__author_email__ = '[email protected]'
__version__ = '1.6.3'
__license__ = 'MIT'
__copyright__ = 'Copyright (c) 2013-2015 Denis Carriere'
# CORE
from geocoder.api import get, yahoo, bing, geonames, mapquest, google, mapbox # noqa
from geocoder.api import nokia, osm, tomtom, geolytica, arcgis, opencage # noqa
from geocoder.api import maxmind, ipinfo, freegeoip, ottawa, here, baidu, w3w, yandex # noqa
# EXTRAS
from geocoder.api import timezone, elevation, ip, canadapost, reverse, distance, location # noqa
# CLI
from geocoder.cli import cli # noqa
|
Python
| 0.000001 |
@@ -643,17 +643,17 @@
= '1.6.
-3
+4
'%0A__lice
|
c0c7eeded364e02509fd66f95acd30e9928c663c
|
Do not respect context
|
taskwiki/taskwiki.py
|
taskwiki/taskwiki.py
|
import sys
import vim # pylint: disable=F0401
from tasklib.task import TaskWarrior, Task
# Insert the taskwiki on the python path
sys.path.insert(0, vim.eval("s:plugin_path") + '/taskwiki')
import cache
import util
import vwtask
"""
How this plugin works:
1.) On startup, it reads all the tasks and syncs info TW -> Vimwiki file. Task is identified by their
uuid.
2.) When saving, the opposite sync is performed (Vimwiki -> TW direction).
a) if task is marked as subtask by indentation, the dependency is created between
"""
tw = TaskWarrior()
cache = cache.TaskCache(tw)
class WholeBuffer(object):
@staticmethod
def update_from_tw():
"""
Updates all the incomplete tasks in the vimwiki file if the info from TW is different.
"""
cache.load_vwtasks()
cache.load_tasks()
cache.update_vwtasks_from_tasks()
cache.update_vwtasks_in_buffer()
cache.evaluate_viewports()
@staticmethod
def update_to_tw():
"""
Updates all tasks that differ from their TaskWarrior representation.
"""
cache.reset()
cache.load_vwtasks()
cache.load_tasks()
cache.save_tasks()
cache.update_vwtasks_in_buffer()
cache.evaluate_viewports()
class Splits(object):
@staticmethod
def projects():
output = tw.execute_command(['projects'])
util.show_in_split(output, name="projects", vertical=True)
@staticmethod
def summary():
output = util.tw_execute_colorful(tw, ['summary'])
util.show_in_split(output, name="summary", vertical=True)
@staticmethod
def burndown():
output = util.tw_execute_colorful(tw, ['burndown'], maxwidth=True)
util.show_in_split(output, name="burndown")
class SelectedTasks(object):
def __init__(self):
self.tw = tw
# Reset cache, otherwise old line content may be used
cache.reset()
# Load the current tasks
range_tasks = [cache[i] for i in util.selected_line_numbers()]
self.tasks = [t for t in range_tasks if t is not None]
if not self.tasks:
print("No tasks selected.")
def info(self):
for vimwikitask in self.tasks:
info = self.tw.execute_command([vimwikitask['uuid'], 'info'])
util.show_in_split(info)
break # Show only one task
def link(self):
path = util.get_absolute_filepath()
for vimwikitask in self.tasks:
vimwikitask.task.add_annotation("wiki: {0}".format(path))
print("Task \"{0}\" linked.".format(vimwikitask['description']))
def delete(self):
# Delete the tasks in TaskWarrior
# Multiple VimwikiTasks might refer to the same task, so make sure
# we do not delete one task twice
for task in set(vimwikitask.task for vimwikitask in self.tasks):
task.delete()
# Remove the lines in the buffer
for vimwikitask in self.tasks:
cache.remove_line(vimwikitask['line_number'])
print("Task \"{0}\" deleted.".format(vimwikitask['description']))
def modify(self, modstring):
# If no modstring was passed as argument, ask the user interactively
if not modstring:
modstring = util.get_input("Enter modifications: ")
# We might have two same tasks in the range, make sure we do not pass the
# same uuid twice
unique_tasks = set(vimwikitask.task['uuid'] for vimwikitask in self.tasks)
uuids = ','.join(unique_tasks)
# Generate the arguments from the modstring
args = util.tw_modstring_to_args(modstring)
# Modify all tasks at once
output = self.tw.execute_command([uuids, 'mod'] + args)
# Update the touched tasks in buffer, if needed
cache.load_tasks()
cache.update_vwtasks_from_tasks()
cache.update_vwtasks_in_buffer()
# Output the feedback from TW
if output:
print(output[-1])
if __name__ == '__main__':
WholeBuffer.update_from_tw()
|
Python
| 0.999521 |
@@ -598,16 +598,87 @@
he(tw)%0A%0A
+# Make sure context is not respected%0Atw.config.update(%7B'context':''%7D)%0A%0A
%0Aclass W
|
2a41ad4f6bce3b554287831ed594afbc0ddc5959
|
Move matching tasks detection to a separate property
|
taskwiki/viewport.py
|
taskwiki/viewport.py
|
import itertools
import re
import vim # pylint: disable=F0401
import vwtask
import regexp
import util
class ViewPort(object):
"""
Represents viewport with a given filter.
A ViewPort is a vimwiki heading which contains (albeit usually hidden
by the vim's concealing feature) the definition of TaskWarrior filter.
ViewPort then displays all the tasks that match the given filter below it.
=== Work related tasks | pro:Work ===
* [ ] Talk with the boss
* [ ] Publish a new blogpost
* [ ] Pick a topic
* [ ] Make sure the hosting is working
"""
def __init__(self, line_number, cache, taskfilter, defaults):
"""
Constructs a ViewPort out of given line.
"""
self.cache = cache
self.tw = cache.tw
self.line_number = line_number
self.taskfilter = taskfilter
self.defaults = defaults
self.tasks = set()
@classmethod
def from_line(cls, number, cache):
match = re.search(regexp.GENERIC_VIEWPORT, vim.current.buffer[number])
if not match:
return None
taskfilter = util.tw_modstring_to_args(match.group('filter') or '')
defaults = util.tw_modstring_to_kwargs(
match.group('filter') + ' ' + (match.group('defaults') or ''))
self = cls(number, cache, taskfilter, defaults)
return self
@classmethod
def find_closest(cls, cache):
current_line = util.get_current_line_number()
# Search lines in order: first all above, than all below
line_numbers = itertools.chain(
reversed(range(0, current_line + 1)),
range(current_line + 1, len(vim.current.buffer))
)
for i in line_numbers:
port = cls.from_line(i, cache)
if port:
return port
@property
def raw_filter(self):
return ' '.join(self.taskfilter)
def load_tasks(self):
# Load all tasks below the viewport
for i in range(self.line_number + 1, len(vim.current.buffer)):
line = vim.current.buffer[i]
match = re.search(regexp.GENERIC_TASK, line)
if match:
self.tasks.add(self.cache[i])
else:
# If we didn't found a valid task, terminate the viewport
break
def sync_with_taskwarrior(self):
# This is called at the point where all the tasks in the vim
# are already synced. This should load the tasks from TW matching
# the filter, and add the tasks that are new. Optionally remove the
# tasks that are not longer belonging there.
# Split the filter into CLI tokens and filter by the expression
# By default, do not list deleted tasks
args = ["-DELETED"] + self.taskfilter
matching_tasks = set(
task for task in self.tw.tasks.filter(*args)
)
to_add = matching_tasks - set(t.task for t in self.tasks)
to_del = set(t.task for t in self.tasks) - matching_tasks
# Remove tasks that no longer match the filter
for task in to_del:
# Find matching vimwikitasks in the self.tasks set
# There might be more if the viewport contained multiple
# representations of the same task
matching_vimwikitasks= [t for t in self.tasks
if t.uuid == task['uuid']]
# Remove the tasks from viewport's set and from buffer
for vimwikitask in matching_vimwikitasks:
self.tasks.remove(vimwikitask)
self.cache.remove_line(vimwikitask['line_number'])
# Add the tasks that match the filter and are not listed
added_tasks = 0
for task in to_add:
added_tasks += 1
added_at = self.line_number + len(self.tasks) + added_tasks
# Add the task object to cache
self.cache[task['uuid']] = task
# Create the VimwikiTask
vimwikitask = vwtask.VimwikiTask.from_task(self.cache, task)
vimwikitask['line_number'] = added_at
# Save it to cache
self.cache[added_at] = vimwikitask
# Update the buffer
self.cache.insert_line(str(vimwikitask), added_at)
|
Python
| 0.000005 |
@@ -1945,16 +1945,314 @@
ilter)%0A%0A
+ @property%0A def matching_tasks(self):%0A # Split the filter into CLI tokens and filter by the expression%0A # By default, do not list deleted tasks%0A args = %5B%22-DELETED%22%5D + self.taskfilter%0A return set(%0A task for task in self.tw.tasks.filter(*args)%0A )%0A%0A
def
@@ -2988,262 +2988,44 @@
-# Split the filter into CLI tokens and filter by the expression%0A # By default, do not list deleted tasks%0A args = %5B%22-DELETED%22%5D + self.taskfilter%0A matching_tasks = set(%0A task for task in self.tw.tasks.filter(*args)%0A )
+matching_tasks = self.matching_tasks
%0A%0A
|
08772aa328b12d358b5047dcfa6f43120d4ffd25
|
remove extra whitespace
|
gmusicapi/session.py
|
gmusicapi/session.py
|
# -*- coding: utf-8 -*-
"""
Sessions handle the details of authentication and transporting requests.
"""
from __future__ import print_function, division, absolute_import, unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import * # noqa
from contextlib import closing
import gpsoauth
import httplib2 # included with oauth2client
import mechanicalsoup
import oauth2client
import requests
from gmusicapi.exceptions import (
AlreadyLoggedIn, NotLoggedIn, CallFailure
)
from gmusicapi.protocol import webclient
from gmusicapi.utils import utils
log = utils.DynamicClientLogger(__name__)
class _Base(object):
def __init__(self, rsession_setup=None):
"""
:param rsession_setup: a callable that will be called with
the backing requests.Session to delegate config to callers.
"""
self._rsession = requests.Session()
if rsession_setup is None:
rsession_setup = lambda x: x # noqa
self._rsession_setup = rsession_setup
self._rsession_setup(self._rsession)
self.is_authenticated = False
def _send_with_auth(self, req_kwargs, desired_auth, rsession):
raise NotImplementedError
def _send_without_auth(self, req_kwargs, rsession):
return rsession.request(**req_kwargs)
def login(self, *args, **kwargs):
# subclasses extend / use super()
if self.is_authenticated:
raise AlreadyLoggedIn
def logout(self):
"""
Reset the session to an unauthenticated, default state.
"""
self._rsession.close()
self._rsession = requests.Session()
self._rsession_setup(self._rsession)
self.is_authenticated = False
def send(self, req_kwargs, desired_auth, rsession=None):
"""Send a request from a Call using this session's auth.
:param req_kwargs: kwargs for requests.Session.request
:param desired_auth: protocol.shared.AuthTypes to attach
:param rsession: (optional) a requests.Session to use
(default ``self._rsession`` - this is exposed for test purposes)
"""
res = None
if not any(desired_auth):
if rsession is None:
# use a throwaway session to ensure it's clean
with closing(requests.Session()) as new_session:
self._rsession_setup(new_session)
res = self._send_without_auth(req_kwargs, new_session)
else:
res = self._send_without_auth(req_kwargs, rsession)
else:
if not self.is_authenticated:
raise NotLoggedIn
if rsession is None:
rsession = self._rsession
res = self._send_with_auth(req_kwargs, desired_auth, rsession)
return res
class Webclient(_Base):
def login(self, email, password, *args, **kwargs):
"""
Perform serviceloginauth then retrieve webclient cookies.
:param email:
:param password:
"""
super(Webclient, self).login()
# Google's login form has a bunch of hidden fields I'd rather not deal with manually.
browser = mechanicalsoup.Browser(soup_config={"features": "html.parser"})
login_page = browser.get('https://accounts.google.com/ServiceLoginAuth',
params={'service': 'sj',
'continue': 'https://play.google.com/music/listen'})
form_candidates = login_page.soup.select("form")
if len(form_candidates) > 1:
log.error("Google login form dom has changed; there are %s candidate forms:\n%s",
len(form_candidates), form_candidates)
return False
form = form_candidates[0]
form.select("#Email")[0]['value'] = email
response = browser.submit(form, 'https://accounts.google.com/AccountLoginInfo')
try:
response.raise_for_status()
except requests.HTTPError:
log.exception("submitting login form failed")
return False
form_candidates = response.soup.select("form")
if len(form_candidates) > 1:
log.error("Google login form dom has changed; there are %s candidate forms:\n%s",
len(form_candidates), form_candidates)
return False
form = form_candidates[0]
form.select("#Passwd")[0]['value'] = password
response = browser.submit(form, 'https://accounts.google.com/ServiceLoginAuth')
try:
response.raise_for_status()
except requests.HTTPError:
log.exception("submitting login form failed")
return False
# We can't use in without .keys(), since international users will see a
# CookieConflictError.
if 'SID' not in list(browser.session.cookies.keys()):
# Invalid auth.
return False
self._rsession.cookies.update(browser.session.cookies)
self.is_authenticated = True
# Get webclient cookies.
# They're stored automatically by requests on the webclient session.
try:
webclient.Init.perform(self, True)
except CallFailure:
log.exception("unable to initialize webclient cookies")
self.logout()
return self.is_authenticated
def _send_with_auth(self, req_kwargs, desired_auth, rsession):
if desired_auth.xt:
req_kwargs.setdefault('params', {})
req_kwargs['params'].update({'u': 0, 'xt': rsession.cookies['xt']})
return rsession.request(**req_kwargs)
class Mobileclient(_Base):
def __init__(self, *args, **kwargs):
super(Mobileclient, self).__init__(*args, **kwargs)
self._master_token = None
self._authtoken = None
def login(self, email, password, android_id, *args, **kwargs):
"""
Get a master token, then use it to get a skyjam OAuth token.
:param email:
:param password:
:param android_id:
"""
super(Mobileclient, self).login(email, password, android_id, *args, **kwargs)
res = gpsoauth.perform_master_login(email, password, android_id)
if 'Token' not in res:
return False
self._master_token = res['Token']
res = gpsoauth.perform_oauth(
email, self._master_token, android_id,
service='sj', app='com.google.android.music',
client_sig='38918a453d07199354f8b19af05ec6562ced5788')
if 'Auth' not in res:
return False
self._authtoken = res['Auth']
self.is_authenticated = True
return True
def _send_with_auth(self, req_kwargs, desired_auth, rsession):
if desired_auth.oauth:
req_kwargs.setdefault('headers', {})
# does this expire?
req_kwargs['headers']['Authorization'] = \
'GoogleLogin auth=' + self._authtoken
return rsession.request(**req_kwargs)
class Musicmanager(_Base):
def __init__(self, *args, **kwargs):
super(Musicmanager, self).__init__(*args, **kwargs)
self._oauth_creds = None
def login(self, oauth_credentials, *args, **kwargs):
"""Store an already-acquired oauth2client.Credentials."""
super(Musicmanager, self).login()
try:
# refresh the token right away to check auth validity
oauth_credentials.refresh(httplib2.Http())
except oauth2client.client.Error:
log.exception("error when refreshing oauth credentials")
if oauth_credentials.access_token_expired:
log.info("could not refresh oauth credentials")
return False
self._oauth_creds = oauth_credentials
self.is_authenticated = True
return self.is_authenticated
def _send_with_auth(self, req_kwargs, desired_auth, rsession):
if desired_auth.oauth:
if self._oauth_creds.access_token_expired:
self._oauth_creds.refresh(httplib2.Http())
req_kwargs['headers'] = req_kwargs.get('headers', {})
req_kwargs['headers']['Authorization'] = \
'Bearer ' + self._oauth_creds.access_token
return rsession.request(**req_kwargs)
|
Python
| 0.99999 |
@@ -3966,24 +3966,16 @@
nInfo')%0A
-
%0A
|
b3ab8fa855a08f0d63885b6df206715d1f36a817
|
Add DNS-over-HTTPS example script
|
mrequests/examples/dns-over-https.py
|
mrequests/examples/dns-over-https.py
|
Python
| 0 |
@@ -0,0 +1,962 @@
+import mrequests%0Afrom urlencode import urlencode%0A%0ADOH_IP = %221.1.1.1%22%0ADOH_SERVER = b%22cloudflare-dns.com%22%0ADOH_PATH = %22/dns-query%22%0A%0A%0Adef gethostbyname(name):%0A params = urlencode(%7B%0A %22name%22: name,%0A %22type%22: %22A%22%0A %7D)%0A headers = %7B%0A b%22accept%22: b%22application/dns-json%22,%0A b%22user-agent%22: b%22mrequests.py%22,%0A b%22Host%22: DOH_SERVER%0A %7D%0A req = mrequests.get(%0A %22https://%7B%7D%7B%7D?%7B%7D%22.format(DOH_IP, DOH_PATH, params),%0A headers=headers%0A )%0A# ~ print(req.status_code)%0A if req.status == 200:%0A reply = req.json()%0A else:%0A reply = %7B%7D%0A%0A req.close()%0A%0A if reply.get(%22Status%22) == 0:%0A return %5Bitem%5B%22data%22%5D for item in reply.get(%22Answer%22, %5B%5D)%5D%0A%0A%0Aif __name__ == '__main__':%0A import sys%0A #name = sys.argv%5B1%5D%0A name = %22httpbin.org%22%0A res = gethostbyname(name)%0A if res:%0A print(%22 %22.join(res))%0A else:%0A print(%22Could not resolve host name '%7B%7D'.%22.format(name), file=sys.stderr)%0A
|
|
4f765997c740f1f9b2dc985e7f3b0a467e8c311a
|
add code.
|
image_to_yymmdd_dir_by_EXIF.py
|
image_to_yymmdd_dir_by_EXIF.py
|
Python
| 0.000001 |
@@ -0,0 +1,1041 @@
+# -*- coding: utf-8 -*- %0A%0Afrom PIL import Image%0Aimport os%0Aimport shutil%0A%0Auser_name = os.getlogin()%0A# image/hoge.jpg, image/fuga.png, etc...%0Asrc_dir = %22/Users/%22 + user_name + %22/Desktop/image/%22%0A# create dst_dir/yyyymmdd/%0Adst_dir = %22/Users/%22 + user_name + %22/Desktop/dst_dir/%22%0A%0Aif os.path.exists(dst_dir) == False:%0A os.mkdir(dst_dir)%0A%0Afor root, dirs, files in os.walk(src_dir):%0A for filename in files:%0A try:%0A image_info = Image.open(src_dir + filename)%0A # 36867 : EXIF DateTimeOriginal%0A date = image_info._getexif()%5B36867%5D%0A yyyy, mm, dd = date%5B:4%5D, date%5B5:7%5D, date%5B8:10%5D%0A yyyymmdd_dir = os.path.join(dst_dir, yyyy + mm + dd)%0A if os.path.exists(yyyymmdd_dir) == False:%0A os.mkdir(yyyymmdd_dir)%0A dst = os.path.join(yyyymmdd_dir, filename)%0A if os.path.exists(dst) == False:%0A shutil.copy2(src_dir + filename, dst)%0A except Exception as e:%0A # .DS_Store must Die%0A print filename + ' is fail.'%0A
|
|
f7a1998f67a02530604e4b727c7600704e4eb341
|
update pelu to K2
|
keras_contrib/layers/advanced_activations.py
|
keras_contrib/layers/advanced_activations.py
|
from .. import initializers
from keras.engine import Layer
from .. import backend as K
import numpy as np
class PELU(Layer):
"""Parametric Exponential Linear Unit.
It follows:
`f(x) = alphas * (exp(x / betas) - 1) for x < 0`,
`f(x) = (alphas / betas) * x for x >= 0`,
where `alphas` & `betas` are learned arrays with the same shape as x.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as the input.
# Arguments
alphas_initializer: initialization function for the alpha variable weights.
betas_initializer: initialization function for the beta variable weights.
weights: initial weights, as a list of a single Numpy array.
shared_axes: the axes along which to share learnable
parameters for the activation function.
For example, if the incoming feature maps
are from a 2D convolution
with output shape `(batch, height, width, channels)`,
and you wish to share parameters across space
so that each filter only has one set of parameters,
set `shared_axes=[1, 2]`.
# References
- [PARAMETRIC EXPONENTIAL LINEAR UNIT FOR DEEP CONVOLUTIONAL NEURAL NETWORKS](https://arxiv.org/abs/1605.09332v3)
"""
def __init__(self, alphas_initializer='one', betas_initializer='one', weights=None, shared_axes=None, **kwargs):
self.supports_masking = True
self.alphas_initializer = initializers.get(alphas_initializer)
self.betas_initializer = initializers.get(betas_initializer)
self.initial_weights = weights
if not isinstance(shared_axes, (list, tuple)):
self.shared_axes = [shared_axes]
else:
self.shared_axes = list(shared_axes)
super(PELU, self).__initializer__(**kwargs)
def build(self, input_shape):
param_shape = list(input_shape[1:])
self.param_broadcast = [False] * len(param_shape)
if self.shared_axes[0] is not None:
for i in self.shared_axes:
param_shape[i - 1] = 1
self.param_broadcast[i - 1] = True
# Initialised as ones to emulate the default ELU
self.alphas = self.add_weight(param_shape,
name='alpha',
initializer=self.alphas_initializerializer)
self.betas = self.add_weight(param_shape, name='betas', initializer=self.betas_initializerializer)
self.trainable_weights = [self.alphas, self.betas]
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def call(self, x, mask=None):
if K.backend() == 'theano':
pos = K.relu(x) * (K.pattern_broadcast(self.alphas, self.param_broadcast) /
K.pattern_broadcast(self.betas, self.param_broadcast))
neg = (K.pattern_broadcast(self.alphas, self.param_broadcast) *
(K.exp((-K.relu(-x)) / K.pattern_broadcast(self.betas, self.param_broadcast)) - 1))
else:
pos = K.relu(x) * self.alphas / self.betas
neg = self.alphas * (K.exp((-K.relu(-x)) / self.betas) - 1)
return neg + pos
def get_config(self):
config = {'alphas_initializer': initializers.serialize(self.alphas_initializer),
'betas_initializer': initializers.serialize(betas_initializer)}
base_config = super(PELU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
Python
| 0 |
@@ -1963,23 +1963,16 @@
).__init
-ializer
__(**kwa
|
920dbe007501ea99b95c41f94fb8f4a48c40717a
|
Add SensorsCollector, which collects data from libsensors via PySensors
|
src/collectors/SensorsCollector/SensorsCollector.py
|
src/collectors/SensorsCollector/SensorsCollector.py
|
Python
| 0 |
@@ -0,0 +1,1112 @@
+import diamond.collector%0A%0Aimport sensors%0A%0Aclass SensorsCollector(diamond.collector.Collector):%0A %22%22%22%0A This class collects data from libsensors. It should work against libsensors 2.x and 3.x, pending%0A support within the PySensors Ctypes binding: http://pypi.python.org/pypi/PySensors/%0A%0A Requires: 'sensors' to be installed, configured, and the relevant kernel modules to be loaded.%0A Requires: PySensors requires Python 2.6+%0A%0A If you're having issues, check your version of 'sensors'. This collector written against:%0A sensors version 3.1.2 with libsensors version 3.1.2%0A %22%22%22%0A%0A def get_default_config(self):%0A %22%22%22%0A Returns default collector settings.%0A %22%22%22%0A return %7B%0A 'enabled': 'True',%0A 'path': 'sensors',%0A 'fahrenheit': 'True'%0A %7D%0A%0A def collect(self):%0A sensors.init()%0A try:%0A for chip in sensors.iter_detected_chips():%0A for feature in chip:%0A self.publish(%22.%22.join(%5Bstr(chip), feature.label%5D), feature.get_value())%0A finally:%0A sensors.cleanup()%0A
|
|
66f32607d9d140be2a8e71270862074c53121a68
|
Create dataUIwgt.py
|
pyside/pyside_basics/jamming/dataUIwgt.py
|
pyside/pyside_basics/jamming/dataUIwgt.py
|
Python
| 0.000002 |
@@ -0,0 +1,1810 @@
+from PySide import QtGui%0A%0A%0Aclass Data(object):%0A def __init__(self):%0A self.requiredNames = %22A B C D E%22.split(' ')%0A self.availableActions = %22Set Select Delete%22.split(' ')%0A %0A def Set(self, name):%0A print %22setting %22, name%0A %0A def Select(self, name):%0A print %22selecting %22, name%0A%0A def Delete(self, name):%0A print %22deleting %22, name%0A%0Aclass ActionButton(QtGui.QPushButton):%0A delegateActionSignal = QtCore.Signal((str, str))%0A%0A def __init__(self, itemName, actionName, parent=None):%0A super(ActionButton, self).__init__(parent)%0A self.itemName = itemName%0A self.actionName = actionName%0A self.clicked.connect(self._delegate)%0A self.setText(self.actionName)%0A %0A def _delegate(self):%0A self.delegateActionSignal.emit(self.itemName, self.actionName)%0A%0A%0A# def delegated(itemName, actionName):%0A# print itemName, actionName%0A# %0A# self = ActionButton('A', 'Set')%0A# self.delegateActionSignal.connect(delegated)%0A# self.show()%0A%0Aclass DataUIWidget(QtGui.QWidget):%0A def __init__(self, data, parent=None):%0A super(DataUIWidget, self).__init__(parent)%0A self.data = data%0A self._setupUI()%0A%0A def handleAction(self, itemName, actionName):%0A print itemName, actionName%0A%0A def _setupUI(self):%0A layout = QtGui.QGridLayout()%0A self.setLayout(layout)%0A %0A for index, name in enumerate(self.data.requiredNames):%0A lbl = QtGui.QLabel(name)%0A layout.addWidget(lbl, index, 0)%0A for ind, actName in enumerate(self.data.availableActions, 1):%0A btn = ActionButton(name, actName)%0A btn.delegateActionSignal.connect(self.handleAction)%0A layout.addWidget(btn, index, ind)%0A%0Adata = Data()%0Aself = DataUIWidget(data)%0Aself.show()%0A
|
|
ae948c95ea0087f33f13ef3463dc022eda0301a2
|
Add a solution for the MadLibs lab
|
python/labs/make-a-short-story/mystory.py
|
python/labs/make-a-short-story/mystory.py
|
Python
| 0.000008 |
@@ -0,0 +1,722 @@
+# Create a function for adjectives so I don't repeat myself in prompts.%0Adef get_adjective():%0A return raw_input(%22Give me an adjective: %22)%0A%0Adef get_noun():%0A return raw_input(%22Give me a noun: %22)%0A%0Adef get_verb():%0A return raw_input(%22Give me a verb: %22)%0A%0Aadjective1 = get_adjective()%0Anoun1 = get_noun()%0Averb1 = get_verb()%0Aadjective2 = get_adjective()%0Anoun2 = get_noun()%0Averb2 = get_verb()%0A%0A# Use parentheses so Python will %22know%22 the string has multiple lines%0Aprint (%22At CSSI we were all %22 + adjective1 + %22 when a %22 + noun1 +%0A %22 fell through the ceiling. See-Mong tried to %22 + verb1 + %22 it but it %22 +%0A %22was too %22 + adjective2 + %22. Instead, Zack gave it a %22 + noun2 + %22 which %22 +%0A %22caused it to %22 + verb2 + %22.%22)%0A
|
|
3cf1eb01540a126ef6a38219f89a41a0f05ad63f
|
Format fixing
|
constants.py
|
constants.py
|
Python
| 0.000001 |
@@ -0,0 +1,1292 @@
+UNITS = %22SI%22%0A%0AUNIT_LENGTH = 1%0AUNIT_MASS = 1%0AUNIT_TIME = 1%0A%0ADEFAULT_GRAVITATIONAL_CONSTANT = 6.673e-11 # m3 kg-1 s-2 %0ADEFAULT_SPEED_OF_LIGHT = 299792458 # m s-1%0ADEFAULT_SOLAR_MASS = 1.98892e30 # kg%0ADEFAULT_PARSEC = 3.08568025e16 # m%0ADEFAULT_YEAR = 31556926 # s%0A%0ADEFAULT_h = 0.73%0A%0AG = GRAVITATIONAL_CONSTANT = DEFAULT_GRAVITATIONAL_CONSTANT%0Ac = SPEED_OF_LIGHT = DEFAULT_SPEED_OF_LIGHT%0A%0Adef set_units(units):%0A global UNITS%0A global c, SPEED_OF_LIGHT, G, GRAVITATIONAL_CONSTANT%0A %0A if units==%22SI%22:%0A UNIT_LENGTH = 1%0A UNIT_MASS = 1%0A UNIT_TIME = 1%0A%0A elif units==%22GALACTIC%22:%0A UNIT_LENGTH = (1e6 * DEFAULT_PARSEC / DEFAULT_h) # 1.0 Mpc h%5E-1%0A UNIT_MASS = (1e10 * DEFAULT_SOLAR_MASS / DEFAULT_h) # 10%5E10 M_solar h%5E-1%0A UNIT_TIME = (1e3 * DEFAULT_PARSEC / DEFAULT_h) # 977.8 Gyr h%5E-1%0A %0A elif units==%22CGI%22:%0A UNIT_LENGTH = 0.01%0A UNIT_MASS = 0.001%0A UNIT_TIME = 1%0A%0A UNITS = units%0A G = GRAVITATIONAL_CONSTANT = DEFAULT_GRAVITATIONAL_CONSTANT * UNIT_MASS * UNIT_TIME**2 / UNIT_LENGTH**3%0A c = SPEED_OF_LIGHT = DEFAULT_SPEED_OF_LIGHT * UNIT_TIME / UNIT_LENGTH;%0A %0Aset_units(%22SI%22)
|
|
476f2493576c55c0f412165e3c3ce8225599ba0a
|
Copy caller_checker.py
|
server/src/voodoo/gen2/caller_checker.py
|
server/src/voodoo/gen2/caller_checker.py
|
Python
| 0.000004 |
@@ -0,0 +1,927 @@
+#!/usr/bin/python%0A# -*- coding: utf-8 -*-%0A#%0A# Copyright (C) 2005 onwards University of Deusto%0A# All rights reserved.%0A#%0A# This software is licensed as described in the file COPYING, which%0A# you should have received as part of this distribution.%0A#%0A# This software consists of contributions made by many individuals,%0A# listed below:%0A#%0A# Author: Pablo Ordu%C3%B1a %[email protected]%3E%0A#%0A%0AALL = 'All servers'%0A%0Adef caller_check(servers = ALL):%0A def func_wrapper(func):%0A def wrapped_func(*args, **kargs):%0A # TODO%0A# try:%0A# servers%5B0%5D%0A# except TypeError:%0A# all_servers = (servers,)%0A# else:%0A# all_servers = servers%0A #TODO: work with all_servers%0A return func(*args,**kargs)%0A wrapped_func.__name__ = func.__name__%0A wrapped_func.__doc__ = func.__doc__%0A return wrapped_func%0A return func_wrapper%0A%0A
|
|
77b34390345208a6e0bc5ad30cdce62e42ca0c56
|
Add simple command to list speakers and tickets
|
wafer/management/commands/pycon_speaker_tickets.py
|
wafer/management/commands/pycon_speaker_tickets.py
|
Python
| 0 |
@@ -0,0 +1,1609 @@
+import sys%0Aimport csv%0Afrom optparse import make_option%0A%0Afrom django.core.management.base import BaseCommand%0A%0Afrom django.contrib.auth.models import User%0Afrom wafer.talks.models import ACCEPTED%0A%0A%0Aclass Command(BaseCommand):%0A help = %22List speakers and associated tickets.%22%0A%0A option_list = BaseCommand.option_list + tuple(%5B%0A make_option('--speakers', action=%22store_true%22, default=False,%0A help='List speakers and tickets (for accepted talks)'),%0A make_option('--allspeakers', action=%22store_true%22, default=False,%0A help='List speakers and tickets (for all talks)'),%0A %5D)%0A%0A def _speaker_tickets(self, options):%0A people = User.objects.filter(talks__isnull=False).distinct()%0A%0A csv_file = csv.writer(sys.stdout)%0A for person in people:%0A # We query talks to filter out the speakers from ordinary%0A # accounts%0A if options%5B'allspeakers'%5D:%0A titles = %5Bx.title for x in person.talks.all()%5D%0A else:%0A titles = %5Bx.title for x in%0A person.talks.filter(status=ACCEPTED)%5D%0A if not titles:%0A continue%0A tickets = person.ticket.all()%0A if tickets:%0A ticket = '%25d' %25 tickets%5B0%5D.barcode%0A else:%0A ticket = 'NO TICKET PURCHASED'%0A row = %5Bx.encode(%22utf-8%22) for x in (person.get_full_name(),%0A person.email,%0A ticket)%5D%0A csv_file.writerow(row)%0A%0A def handle(self, *args, **options):%0A self._speaker_tickets(options)%0A
|
|
c2e882855ea56c265ef46646ec5e20f78d0ad064
|
add migrations for missing phaselogs after fixing bulk project status updates
|
bluebottle/projects/migrations/0028_auto_20170619_1555.py
|
bluebottle/projects/migrations/0028_auto_20170619_1555.py
|
Python
| 0 |
@@ -0,0 +1,1279 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.10.7 on 2017-06-19 13:55%0Afrom __future__ import unicode_literals%0Aimport datetime%0Afrom django.db import migrations%0A%0Adef fix_phaselog_for_incorrect_project_statuses(apps, schema_editor):%0A %22%22%22%0A #BB-9886 : Fix to add a new project phase status logs for projects whose status does not correspond to the last%0A project phase status log. We have to fake a timestamp as we dont know when the status was really updated.%0A %22%22%22%0A Project = apps.get_model('projects', 'Project')%0A ProjectPhaseLog = apps.get_model('projects', 'ProjectPhaseLog')%0A%0A for project in Project.objects.all():%0A last_project_phase_log = ProjectPhaseLog.objects.filter(project=project).order_by('start').last()%0A if project.status != last_project_phase_log.status:%0A start = last_project_phase_log.start + datetime.timedelta(minutes = 1)%0A log = ProjectPhaseLog.objects.create(project=project, status=project.status, start=start)%0A log.save()%0A%0Adef dummy(apps, schema_editor):%0A pass%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('projects', '0027_auto_20170602_2240'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(fix_phaselog_for_incorrect_project_statuses, dummy),%0A %5D%0A
|
|
3bf484de25cc77b0fc8f04e77f3993f4c1e939a8
|
Break on self.quitting line
|
tests/test_shells/postproc.py
|
tests/test_shells/postproc.py
|
#!/usr/bin/env python
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import socket
import sys
import codecs
import platform
import re
test_type = sys.argv[1]
test_client = sys.argv[2]
shell = sys.argv[3]
fname = os.path.join('tests', 'shell', '.'.join((shell, test_type, test_client, 'full.log')))
new_fname = os.path.join('tests', 'shell', '.'.join((shell, test_type, test_client, 'log')))
pid_fname = os.path.join('tests', 'shell', '3rd', 'pid')
is_pypy = platform.python_implementation() == 'PyPy'
try:
with open(pid_fname, 'r') as P:
pid = P.read().strip()
except IOError:
pid = None
hostname = socket.gethostname()
user = os.environ['USER']
REFS_RE = re.compile(r'^\[\d+ refs\]\n')
IPYPY_DEANSI_RE = re.compile(r'\033(?:\[(?:\?\d+[lh]|[^a-zA-Z]+[a-ln-zA-Z])|[=>])')
start_str = 'cd tests/shell/3rd'
if shell == 'pdb':
start_str = 'class Foo(object):'
with codecs.open(fname, 'r', encoding='utf-8') as R:
with codecs.open(new_fname, 'w', encoding='utf-8') as W:
found_cd = False
i = -1
for line in (R if shell != 'fish' else R.read().split('\n')):
i += 1
if not found_cd:
found_cd = (start_str in line)
continue
if 'true is the last line' in line:
break
line = line.translate({
ord('\r'): None
})
if REFS_RE.match(line):
continue
line = line.replace(hostname, 'HOSTNAME')
line = line.replace(user, 'USER')
if pid is not None:
line = line.replace(pid, 'PID')
if shell == 'fish':
res = ''
try:
while line.index('\033[0;'):
start = line.index('\033[0;')
end = line.index('\033[0m', start)
res += line[start:end + 4] + '\n'
line = line[end + 4:]
except ValueError:
pass
line = res
elif shell == 'tcsh':
try:
start = line.index('\033[0;')
end = line.index(' ', start)
line = line[start:end] + '\033[0m\n'
except ValueError:
line = ''
elif shell == 'mksh':
# Output is different in travis: on my machine I see full
# command, in travis it is truncated just after `true`.
if line.startswith('[1] + Terminated'):
line = '[1] + Terminated bash -c ...\n'
elif shell == 'dash':
# Position of this line is not stable: it may go both before and
# after the next line
if line.startswith('[1] + Terminated'):
continue
elif shell == 'ipython' and is_pypy:
try:
end_idx = line.rindex('\033[0m')
try:
idx = line[:end_idx].rindex('\033[1;1H')
except ValueError:
idx = line[:end_idx].rindex('\033[?25h')
line = line[idx + len('\033[1;1H'):]
except ValueError:
pass
try:
data_end_idx = line.rindex('\033[1;1H')
line = line[:data_end_idx] + '\n'
except ValueError:
pass
if line == '\033[1;1H\n':
continue
was_empty = line == '\n'
line = IPYPY_DEANSI_RE.subn('', line)[0]
if line == '\n' and not was_empty:
line = ''
elif shell == 'rc':
if line == 'read() failed: Connection reset by peer\n':
line = ''
elif shell == 'pdb':
if is_pypy:
if line == '\033[?1h\033=\033[?25l\033[1A\n':
line = ''
line = IPYPY_DEANSI_RE.subn('', line)[0]
if line == '\n':
line = ''
if line.startswith(('>',)):
line = ''
elif line == '-> self.quitting = 1\n':
line = '-> self.quitting = True\n'
elif line == '\n':
line = ''
W.write(line)
|
Python
| 0 |
@@ -3421,16 +3421,71 @@
ne = ''%0A
+%09%09%09%09if line == '-%3E self.quitting = True%5Cn':%0A%09%09%09%09%09break%0A
%09%09%09W.wri
|
9cb5658c53a2202931e314ced3ee66714301a087
|
Create _im_rot_manual_detect.py
|
resources/_py_in/_im_rot_manual_detect.py
|
resources/_py_in/_im_rot_manual_detect.py
|
Python
| 0.000064 |
@@ -0,0 +1,2557 @@
+# PYTHON%0A# MANISH DEVGAN%0A# https://github.com/gabru-md%0A%0A# Program helps in detecting faces which are%0A# tilted right or left! The detection is done by %0A# rotating the image and the trying to detect the %0A# potential faces in it!%0A%0A#BEGIN%0A%0A# importing %0A%0Aimport cv2%0Aimport numpy as np%0Aimport os%0Aimport sys%0A%0A%0A# function to rotate the image to a specific angle begins%0Adef rotate(img,angle):%0A image = np.copy(img)%0A (h, w) = image.shape%5B:2%5D%0A (cX, cY) = (w // 2, h // 2)%0A%0A # grab the rotation matrix (applying the negative of the%0A # angle to rotate clockwise), then grab the sine and cosine%0A # (i.e., the rotation components of the matrix)%0A M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0)%0A cos = np.abs(M%5B0, 0%5D)%0A sin = np.abs(M%5B0, 1%5D)%0A%0A # compute the new bounding dimensions of the image%0A nW = int((h * sin) + (w * cos))%0A nH = int((h * cos) + (w * sin))%0A%0A # adjust the rotation matrix to take into account translation%0A M%5B0, 2%5D += (nW / 2) - cX%0A M%5B1, 2%5D += (nH / 2) - cY%0A%0A # perform the actual rotation and return the image%0A _im = cv2.warpAffine(image, M, (nW, nH))%0A # a new vairable is taken instead of the old one as it will then form 2 different copies%0A # instead of forming a reference of the object or altering the object itself%0A%0A # now show the rotated image!%0A%0A return _im%0A# function ends %0A%0A# reading image which is to be rotated%0A# this image will then be further looked in for faces at different angles%0Aimage = cv2.imread('te.jpg')%0A%0AcascPath = %22haarcascade_frontalface_default.xml%22%0A%0Aos.chdir('C:%5CUsers%5CManish%5CDesktop')%0A%0A# range is taken from 0 to 360 %0A# therefore we have range(360+1)%0A%0Afor i in range(361):%0A # new object of image type or numpy.ndarray is created and named _im%0A # this will have our rotated image%0A _im = rotate(image,i)%0A %0A # converting our _im to grayscale to detect potential faces in it!%0A _gray = cv2.cvtColor(_im,cv2.COLOR_BGR2GRAY)%0A %0A # declaring a classifier based on the cascade specified%0A # in this case it is : 'haarcascade_frontalface_default.xml'%0A faces = faceCascade.detectMultiScale(%0A _gray,%0A scaleFactor = 1.2,%0A minNeighbors=1,%0A minSize=(15,15),%0A flags = cv2.cv.CV_HAAR_SCALE_IMAGE%0A )%0A %0A # drawing a box around the potential faces that have been identified%0A %0A for (x,y,w,h) in faces:%0A cv2.rectangle(_im,(x+int(w*0.18),y+int(h*0.15)),(x+int(w*0.80),y+int(h*0.90)),(0,255,0),2)%0A %0A # showing the rotated image to the user!%0A cv2.imshow('Rotated Image',_im)%0A if cv2.waitKey(0) == 27:%0A break%0A%0A#END%0A
|
|
36ada2dc33ccb3cb1803f67a112e3559efd7e821
|
Add file to initialize item endpoint - Add item_fields
|
app/api/item.py
|
app/api/item.py
|
Python
| 0 |
@@ -0,0 +1,440 @@
+%22%22%22 Routes for bucket_item Functionality%22%22%22%0A# from flask import g%0A# from flask import Blueprint, request, jsonify%0Afrom flask_restplus import fields%0A# from app.models.bucketlist import Bucketlist%0A# from app import api%0A%0Aitem_fields = %7B%0A 'id': fields.Integer,%0A 'name': fields.String,%0A 'date_created': fields.DateTime(attribute='created_at'),%0A 'date_modified': fields.DateTime(attribute='modified_at'),%0A 'done': fields.Boolean%0A%7D%0A
|
|
6c34347dc0bac58ca4e8e25f355f6ad0f7295ccd
|
Find/replace with regular expressions
|
ocradmin/plugins/util_nodes.py
|
ocradmin/plugins/util_nodes.py
|
"""
Nodes to perform random things.
"""
from nodetree import node, writable_node, manager
from ocradmin.plugins import stages, generic_nodes
NAME = "Utils"
from HTMLParser import HTMLParser
class HTMLContentHandler(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self._data = []
self._ctag = None
self._cattrs = None
def data(self):
return "".join(self._data)
def content_data(self, data, tag, attrs):
"""ABC method. Does nothing by default."""
return data
def parsefile(self, filename):
with open(filename, "r") as f:
for line in f.readlines():
self.feed(line)
return self.data()
def parse(self, string):
self._data = []
self.feed(string)
return self.data()
def handle_decl(self, decl):
self._data.append("<!%s>" % decl)
def handle_comment(self, comment):
self._data.append("<!-- %s -->" % comment)
def handle_starttag(self, tag, attrs):
"""Simple add the tag to the data stack."""
self._ctag = tag
self._cattrs = attrs
self._data.append(
"<%s %s>" % (tag, " ".join(["%s='%s'" % a for a in attrs])))
def handle_data(self, data):
self._data.append(self.content_data(
data, self._ctag, self._cattrs))
def handle_endtag(self, tag):
self._data.append("</%s>" % tag)
class FindReplaceNode(node.Node, generic_nodes.TextWriterMixin):
"""
Find an replace stuff in input with output.
"""
stage = stages.UTILS
name = "Utils::FindReplace"
description = "Find and replace string in HOCR documents"
arity = 1
intypes = [generic_nodes.SafeUnicode]
outtype = generic_nodes.SafeUnicode
_parameters = [
dict(name="find", value=""),
dict(name="replace", value=""),
]
def content_data(self, data, tag, attrs):
"""Replace all content data."""
find = self._params.get("find")
repl = self._params.get("replace")
if not (find and repl):
return data
return data.replace(find, repl)
def _eval(self):
"""
Run find/replace on input
"""
xml = self.eval_input(0)
parser = HTMLContentHandler()
parser.content_data = self.content_data
return parser.parse(xml)
class Manager(manager.StandardManager):
"""
Handle Tesseract nodes.
"""
@classmethod
def get_node(self, name, **kwargs):
if name.find("::") != -1:
name = name.split("::")[-1]
if name == "FindReplace":
return FindReplaceNode(**kwargs)
@classmethod
def get_nodes(cls, *oftypes):
return super(Manager, cls).get_nodes(
*oftypes, globals=globals())
if __name__ == "__main__":
for n in Manager.get_nodes():
print n
|
Python
| 0.999564 |
@@ -34,16 +34,26 @@
s.%0A%22%22%22%0A%0A
+import re%0A
from nod
@@ -1913,85 +1913,103 @@
def
-content_data(self, data, tag, attrs):%0A %22%22%22Replace all content data.%22%22%22
+__init__(self, *args, **kwargs):%0A super(FindReplaceNode, self).__init__(*args, **kwargs)
%0A
@@ -2017,138 +2017,400 @@
+self._
find
+re
=
-self._params.get(%22find%22)%0A repl = self._params.get(%22replace%22)%0A if not (find and repl):%0A return
+None%0A self._replace = None%0A%0A def _validate(self):%0A super(FindReplaceNode, self)._validate()%0A try:%0A re.compile(self._params.get(%22find%22))%0A except Exception, err:%0A raise node.ValidationError(self, %22find: regular expression error: %25s%22 %25 err)%0A%0A def content_data(self, data, tag, attrs):%0A %22%22%22Replace all content
data
+.%22%22%22
%0A
@@ -2425,31 +2425,44 @@
urn
-data.replace(find, repl
+self._findre.sub(self._replace, data
)%0A%0A
@@ -2572,16 +2572,269 @@
nput(0)%0A
+ find = self._params.get(%22find%22, %22%22)%0A replace = self._params.get(%22replace%22, %22%22)%0A if find.strip() == %22%22 or replace.strip() == %22%22:%0A return xml%0A self._findre = re.compile(find)%0A self._replace = replace %0A
|
bf53f738bb5408622b08eedb9b0b0c6f80487a0c
|
Create 0603_verbs_vehicles.py
|
2019/0603_verbs_vehicles.py
|
2019/0603_verbs_vehicles.py
|
Python
| 0.000036 |
@@ -0,0 +1,847 @@
+#!/usr/bin/env python3%0A# -*- coding: utf-8 -*-%0A%22%22%22%0ANPR 2019-06-02%0Ahttps://www.npr.org/2019/06/02/728600551/sunday-puzzle-lets-go-toe-to-toe?utm_medium=RSS&utm_campaign=sundaypuzzle%0A%0AThink of a verb in its present and past tense forms. %0ADrop the first letter of each word. %0AThe result will name two vehicles. What are they?%0A%22%22%22%0A%0Aimport requests%0Aimport sys%0Asys.path.append('..')%0Aimport nprcommontools as nct%0A%0A# URL with verb forms%0AURL = 'https://cdn.jsdelivr.net/gh/kulakowka/english-verbs-conjugation@master/src/services/ConjugationService/verbs.json'%0Ar = requests.get(URL)%0Aj = r.json()%0A%0AVEHICLES = frozenset(nct.get_category_members('vehicle'))%0A%0A#%25%25%0Afor d in j:%0A verb = d%5B0%5D%0A past = d%5B1%5D%0A if past is not None:%0A v1 = verb%5B1:%5D%0A p1 = past%5B1:%5D%0A if v1 in VEHICLES and p1 in VEHICLES:%0A print(verb, past, v1, p1)%0A
|
|
60f54674cc7bb619d5275dbd49e346ecee276ff2
|
fix reload module
|
importloader.py
|
importloader.py
|
Python
| 0.000001 |
@@ -0,0 +1,348 @@
+%EF%BB%BF#!/usr/bin/python%0A# -*- coding: UTF-8 -*-%0A%0Adef load(name):%0A%09try:%0A%09%09obj = __import__(name)%0A%09%09reload(obj)%0A%09%09return obj%0A%09except:%0A%09%09pass%0A%0A%09try:%0A%09%09import importlib%0A%09%09obj = importlib.__import__(name)%0A%09%09importlib.reload(obj)%0A%09%09return obj%0A%09except:%0A%09%09pass%0A%0Adef loads(namelist):%0A%09for name in namelist:%0A%09%09obj = load(name)%0A%09%09if obj is not None:%0A%09%09%09return obj%0A
|
|
152db7b696b949c67b5121d42fba28ec31eceb47
|
Create everyeno_keys.py
|
everyeno_keys.py
|
everyeno_keys.py
|
Python
| 0 |
@@ -0,0 +1,257 @@
+tumblr_consumer_key = ''%0Atumblr_consumer_secret = ''%0Atumblr_token_key = ''%0Atumblr_token_secret = ''%0A%0Agoogle_developerKey = ''%0A%0Atwitter_consumer_key = ''%0Atwitter_consumer_secret = ''%0Atwitter_token_key = ''%0Atwitter_token_secret = ''%0A%0Adiscogs_user_token = ''%0A
|
|
768b6fd5f4af994ca9af1470cfcc7fa7eb216a8f
|
Add a binding.gyp file.
|
binding.gyp
|
binding.gyp
|
Python
| 0 |
@@ -0,0 +1,254 @@
+%7B%0A 'targets': %5B%0A %7B%0A 'target_name': 'validation',%0A 'cflags': %5B '-O3' %5D,%0A 'sources': %5B 'src/validation.cc' %5D%0A %7D,%0A %7B%0A 'target_name': 'bufferutil',%0A 'cflags': %5B '-O3' %5D,%0A 'sources': %5B 'src/bufferutil.cc' %5D%0A %7D%0A %5D%0A%7D%0A
|
|
e9b8330d71e48702198117652768ba6791bc1401
|
adds hello world
|
app/helloworld.py
|
app/helloworld.py
|
Python
| 0.999588 |
@@ -0,0 +1,972 @@
+#!/usr/local/bin/python3.7%0A%0Afrom pprint import pprint%0Afrom bson.objectid import ObjectId%0Afrom pymongo import MongoClient%0Aimport datetime%0A%0Aclient = MongoClient('mongodb://localhost:27017/')%0Adb = client.test_database%0Acollection = db.test_collection%0Apost = %7B%22author%22: %22Mike%22,%0A %22text%22: %22My first blog post!%22,%0A %22tags%22: %5B%22mongodb%22, %22python%22, %22pymongo%22%5D,%0A %22date%22: datetime.datetime.utcnow()%7D%0Aposts = db.posts%0Apost_id = posts.insert_one(post).inserted_id%0Apprint(f%22post_id: %7Bpost_id%7D%22)%0Apprint(f%22list_collection_names: %7Bdb.list_collection_names()%7D%22)%0Apprint(posts.find_one())%0Apprint(posts.find_one(%7B%22author%22: %22Mike%22%7D))%0Apprint(posts.find_one(%7B%22author%22: %22Eliot%22%7D))%0Apprint(posts.find_one(%7B%22_id%22: post_id%7D))%0A%0A%0A# The web framework gets post_id from the URL and passes it as a string%0Adef get(post_id):%0A # Convert from string to ObjectId:%0A document = client.db.collection.find_one(%7B'_id': ObjectId(post_id)%7D)%0Apprint(f%22getting post_id '%7Bpost_id%7D' with get(): %7Bget(post_id)%7D%22)%0A
|
|
3bbf06964452683d986db401556183f575d15a55
|
Add script for inserting project into DB
|
insert-project.py
|
insert-project.py
|
Python
| 0.000001 |
@@ -0,0 +1,1507 @@
+#!/usr/bin/env python3%0Aimport pymongo%0Aimport subprocess%0Aimport re%0Afrom datetime import datetime%0Aimport argparse%0Afrom json import load as load_json%0Aimport sys%0A%0A%0Adef _info(msg):%0A sys.stdout.write(msg + '%5Cn')%0A sys.stdout.flush()%0A%0A%0Acl_parser = argparse.ArgumentParser(description='Insert a project into Meteor%5C's local MongoDB')%0Acl_parser.add_argument('input', help='JSON input file')%0Acl_parser.add_argument('--site', default=None, help='Specify Meteor site (default: localhost)')%0Aargs = cl_parser.parse_args()%0A%0Awith open(args.input) as input_file:%0A json = load_json(input_file)%0A%0Acommand = %5B'meteor', 'mongo', '-U'%5D%0Aif args.site:%0A command.append(args.site)%0A_info('Getting Mongo URL...')%0Amongo_url = subprocess.check_output(command).decode().strip()%0Amongo_url, db_name = mongo_url.rsplit('/', 1)%0A_info('Connecting to MongoDB: %7B%7D (DB: %7B%7D)'.format(mongo_url, db_name))%0Aclient = pymongo.MongoClient(mongo_url)%0Adb = client%5Bdb_name%5D%0A%0Aproject = %7B%0A 'created': datetime.utcnow(),%0A 'owner': json%5B'owner'%5D,%0A 'projectId': json%5B'id'%5D,%0A 'tags': json%5B'tags'%5D,%0A 'text': json%5B'description'%5D,%0A 'title': json%5B'title'%5D,%0A 'instructions': json%5B'instructions'%5D,%0A 'pictures': json%5B'pictures'%5D,%0A 'files': json%5B'files'%5D,%0A 'license': json%5B'license'%5D,%0A%7D%0Adb.projects.update(%7B'owner': project%5B'owner'%5D, 'projectId': project%5B'projectId'%5D%7D, project,%0A upsert=True)%0A_info('Successfully inserted project %5C'%7B%7D/%7B%7D%5C' (%7B%7D)'.format(%0A project%5B'owner'%5D,%0A project%5B'projectId'%5D,%0A project%5B'title'%5D,%0A))%0A
|
|
28e483c32d3e946f0f9159fe7459531f284d50aa
|
Add shared counter support to cache.
|
app/molcounter.py
|
app/molcounter.py
|
Python
| 0 |
@@ -0,0 +1,2714 @@
+from google.appengine.api import memcache %0Afrom google.appengine.ext import db%0Aimport random%0Aimport collections%0Aimport logging%0A%0Aclass GeneralCounterShardConfig(db.Model):%0A %22%22%22Tracks the number of shards for each named counter.%22%22%22%0A name = db.StringProperty(required=True)%0A num_shards = db.IntegerProperty(required=True, default=20)%0A%0A%0Aclass GeneralCounterShard(db.Model):%0A %22%22%22Shards for each named counter%22%22%22%0A name = db.StringProperty(required=True)%0A count = db.IntegerProperty(required=True, default=0)%0A %0A %0Adef get_top_names(top_count, all_results):%0A logging.info('%25s from request' %25 top_count)%0A d = collections.defaultdict(list)%0A for counter in GeneralCounterShard.all():%0A d%5Bcounter.name.split('-')%5B-1%5D%5D.append(counter.count)%0A results = %7B%7D%0A for name, counts in d.iteritems():%0A results%5Bname%5D = reduce(lambda x,y: x+y, counts)%0A top = %7B%7D%0A x = collections.defaultdict(list)%0A for name, count in results.iteritems():%0A x%5Bcount%5D.append(name)%0A keys = x.keys()%0A keys.sort()%0A keys.reverse()%0A tc = top_count%0A for k in keys:%0A if top_count %3E 0:%0A logging.info(top_count)%0A top%5Breduce(lambda x,y: '%25s,%25s' %25 (x,y), x%5Bk%5D)%5D = k%0A top_count -= 1%0A else:%0A break%0A logging.info(top)%0A if all_results:%0A return %7B'top-%25s' %25 tc: top, 'results': results%7D%0A else:%0A return %7B'top-%25s' %25 tc: top%7D%0A%0Adef get_count(name):%0A %22%22%22Retrieve the value for a given sharded counter.%0A %0A Parameters:%0A name - The name of the counter %0A %22%22%22%0A total = memcache.get(name)%0A if total is None:%0A total = 0%0A for counter in GeneralCounterShard.all().filter('name = ', name):%0A total += counter.count%0A memcache.add(name, total, 60)%0A return total%0A %0Adef increment(name):%0A %22%22%22Increment the value for a given sharded counter.%0A %0A Parameters:%0A name - The name of the counter %0A %22%22%22%0A config = GeneralCounterShardConfig.get_or_insert(name, name=name)%0A def txn():%0A index = random.randint(0, config.num_shards - 1)%0A shard_name = name + str(index)%0A counter = GeneralCounterShard.get_by_key_name(shard_name)%0A if counter is None:%0A counter = GeneralCounterShard(key_name=shard_name, name=name)%0A counter.count += 1%0A counter.put()%0A db.run_in_transaction(txn)%0A # does nothing if the key does not exist%0A memcache.incr(name)%0A %0Adef increase_shards(name, num): %0A %22%22%22Increase the number of shards for a given sharded counter.%0A Will never decrease the number of shards.%0A %0A Parameters:%0A name - The name of the counter%0A num - How many shards to use%0A %0A %22%22%22%0A config = GeneralCounterShardConfig.get_or_insert(name, name=name)%0A def txn():%0A if config.num_shards %3C num:%0A config.num_shards = num%0A config.put() %0A db.run_in_transaction(txn)%0A%0A%0A
|
|
dbed291584150ef3d219c487f32b47a8f4907195
|
question 1.7
|
crack_1_7.py
|
crack_1_7.py
|
Python
| 0.999967 |
@@ -0,0 +1,394 @@
+test = %5B%5B0,1,2,3%5D,%0A%09%5B1,0,2,3%5D,%0A%09%5B1,2,0,3%5D,%0A%09%5B1,2,3,0%5D%5D%0Araw = %5B%5D%0Acol = %5B%5D%0Alength = len(test)%0Afor x in xrange(length):%0A%09for y in xrange(length):%0A%09%09if test%5Bx%5D%5By%5D == 0:%0A%09%09%09raw.append(x)%0A%09%09%09col.append(y)%0A%0Afor x in raw:%0A%09for y in xrange(length):%0A%09%09test%5Bx%5D%5By%5D = 0%0A%0Afor y in col:%0A%09for x in xrange(length):%0A%09%09test%5Bx%5D%5By%5D = 0%0A%0Afor x in xrange(length):%0A%09for y in xrange(length):%0A%09%09print test%5Bx%5D%5By%5D,%0A%09print%0A
|
|
305b53b4c0ab4be7e2e2c4f9d7d4754915d3eea6
|
Fix bug in perturb_params
|
dadi/Misc.py
|
dadi/Misc.py
|
"""
Miscellaneous utility functions. Including ms simulation.
"""
import os,sys,time
import numpy
import scipy.linalg
#: Storage for times at which each stream was flushed.
__times_last_flushed = {}
def delayed_flush(stream=sys.stdout, delay=1):
"""
Flush a stream, ensuring that it is only flushed every 'delay' *minutes*.
Note that upon the first call to this method, the stream is not flushed.
stream: The stream to flush. For this to work with simple 'print'
statements, the stream should be sys.stdout.
delay: Minimum time *in minutes* between flushes.
This function is useful to prevent I/O overload on the cluster.
"""
global __times_last_flushed
curr_time = time.time()
# If this is the first time this method has been called with this stream,
# we need to fill in the times_last_flushed dict. setdefault will do this
# without overwriting any entry that may be there already.
if stream not in __times_last_flushed:
__times_last_flushed[stream] = curr_time
last_flushed = __times_last_flushed[stream]
# Note that time.time() returns values in seconds, hence the factor of 60.
if (curr_time - last_flushed) >= delay*60:
stream.flush()
__times_last_flushed[stream] = curr_time
def ensure_1arg_func(var):
"""
Ensure that var is actually a one-argument function.
This is primarily used to convert arguments that are constants into
trivial functions of time for use in integrations where parameters are
allowed to change over time.
"""
if numpy.isscalar(var):
# If a constant was passed in, use lambda to make it a nice
# simple function.
var_f = lambda t: var
else:
var_f = var
if not callable(var_f):
raise ValueError('Argument is not a constant or a function.')
try:
var_f(0.0)
except TypeError:
raise ValueError('Argument is not a constant or a one-argument '
'function.')
return var_f
def ms_command(theta, ns, core, iter, recomb=0, rsites=None):
"""
Generate ms command for simulation from core.
theta: Assumed theta
ns: Sample sizes
core: Core of ms command that specifies demography.
iter: Iterations to run ms
recomb: Assumed recombination rate
rsites: Sites for recombination. If None, default is 10*theta.
"""
ms_command = "ms %(total_chrom)i %(iter)i -t %(theta)f -I %(numpops)i "\
"%(sample_sizes)s %(core)s"
if recomb:
ms_command = ms_command + " -r %(recomb)f %(rsites)i"
if not rsites:
rsites = theta*10
sub_dict = {'total_chrom': numpy.sum(ns), 'iter': iter, 'theta': theta,
'numpops': len(ns), 'sample_sizes': ' '.join(map(str, ns)),
'core': core, 'recomb': recomb, 'rsites': rsites}
return ms_command % sub_dict
def perturb_params(params, fold=1, lower_bound=None, upper_bound=None):
"""
Generate a perturbed set of parameters.
Each element of params is radomly perturbed <fold> factors of 2 up or down.
fold: Number of factors of 2 to perturb by
lower_bound: If not None, the resulting parameter set is adjusted to have
all value greater than lower_bound.
upper_bound: If not None, the resulting parameter set is adjusted to have
all value less than upper_bound.
"""
pnew = params * 2**(fold * 2*numpy.random.random(len(params))-1)
if lower_bound:
pnew = numpy.maximum(pnew, 1.01*numpy.asarray(lower_bound))
if upper_bound:
pnew = numpy.minimum(pnew, 0.99*numpy.asarray(upper_bound))
return pnew
def make_fux_table(fid, ts, Q, tri_freq):
"""
Make file of 1-fux for use in ancestral misidentification correction.
fid: Filename to output to.
ts: Expected number of substitutions per site between ingroup and outgroup.
Q: Trinucleotide transition rate matrix. This should be a 64x64 matrix, in
which entries are ordered using the code CGTA -> 0,1,2,3. For example,
ACT -> 3*16+0*4+3*1=51. The transition rate from ACT to AGT is then
entry 51,55.
tri_freq: Dictionary in which each entry maps a trinucleotide to its
stationary frequency. e.g. {'AAA': 0.01, 'AAC':0.012...}
"""
code = 'CGTA'
# Ensure that the rows of Q sum to zero.
for ii,row in enumerate(Q):
s = row.sum() - row[ii]
row[ii] = -s
eQhalf = scipy.linalg.matfuncs.expm(Q * ts/2.)
if not hasattr(fid, 'write'):
newfile = True
fid = file(fid, 'w')
outlines = []
for ii,first in enumerate(code):
for jj,second in enumerate(code):
for kk,third in enumerate(code):
for ll,outgroup in enumerate(code):
# These are the indices into Q and eQ
uind = 16*ii+4*jj+1*kk
xind = 16*ii+4*ll+1*kk
## Note that the Q terms factor out in our final
## calculation.
#Qux = Q[uind,xind]
#denomu = Q[uind].sum() - Q[uind,uind]
PMuUu, PMuUx = 0,0
# Equation 2 in HWB. We have to generalize slightly to
# calculate PMuUx.
for aa,alpha in enumerate(code):
aind = 16*ii+4*aa+1*kk
pia = tri_freq[first+alpha+third]
Pau = eQhalf[aind,uind]
Pax = eQhalf[aind,xind]
PMuUu += pia * Pau*Pau
PMuUx += pia * Pau*Pax
# This is fux. For a given SNP with actual ancestral state
# u, this is the probability that the outgroup will have u.
# Eqn 3 in HWB.
res = PMuUx/(PMuUu + PMuUx)
if outgroup == second:
res = 0
outlines.append('%c%c%c %c %.6f' % (first,second,third,
outgroup, res))
fid.write(os.linesep.join(outlines))
if newfile:
fid.close()
|
Python
| 0.000001 |
@@ -3446,16 +3446,17 @@
(fold *
+(
2*numpy.
@@ -3484,16 +3484,17 @@
ams))-1)
+)
%0A if
|
666c1c0f45c51c92e8e23c2dc144853063363abe
|
Make events plugin work with i18n_subsites
|
events/events.py
|
events/events.py
|
# -*- coding: utf-8 -*-
"""
events plugin for Pelican
=========================
This plugin looks for and parses an "events" directory and generates
blog posts with a user-defined event date. (typically in the future)
It also generates an ICalendar v2.0 calendar file.
https://en.wikipedia.org/wiki/ICalendar
Author: Federico Ceratto <[email protected]>
Released under AGPLv3+ license, see LICENSE
"""
from datetime import datetime, timedelta
from pelican import signals, utils
import icalendar
import logging
import os.path
import pytz
log = logging.getLogger(__name__)
TIME_MULTIPLIERS = {
'w': 'weeks',
'd': 'days',
'h': 'hours',
'm': 'minutes',
's': 'seconds'
}
events = []
def parse_tstamp(ev, field_name):
"""Parse a timestamp string in format "YYYY-MM-DD HH:MM"
:returns: datetime
"""
try:
return datetime.strptime(ev[field_name], '%Y-%m-%d %H:%M')
except Exception, e:
log.error("Unable to parse the '%s' field in the event named '%s': %s" \
% (field_name, ev['title'], e))
raise
def parse_timedelta(ev):
"""Parse a timedelta string in format [<num><multiplier> ]*
e.g. 2h 30m
:returns: timedelta
"""
chunks = ev['event-duration'].split()
tdargs = {}
for c in chunks:
try:
m = TIME_MULTIPLIERS[c[-1]]
val = int(c[:-1])
tdargs[m] = val
except KeyError:
log.error("""Unknown time multiplier '%s' value in the \
'event-duration' field in the '%s' event. Supported multipliers \
are: '%s'.""" % (c, ev['title'], ' '.join(TIME_MULTIPLIERS)))
raise RuntimeError("Unknown time multiplier '%s'" % c)
except ValueError:
log.error("""Unable to parse '%s' value in the 'event-duration' \
field in the '%s' event.""" % (c, ev['title']))
raise ValueError("Unable to parse '%s'" % c)
return timedelta(**tdargs)
def parse_article(generator, metadata):
"""Collect articles metadata to be used for building the event calendar
:returns: None
"""
if 'event-start' not in metadata:
return
dtstart = parse_tstamp(metadata, 'event-start')
if 'event-end' in metadata:
dtend = parse_tstamp(metadata, 'event-end')
elif 'event-duration' in metadata:
dtdelta = parse_timedelta(metadata)
dtend = dtstart + dtdelta
else:
msg = "Either 'event-end' or 'event-duration' must be" + \
" speciefied in the event named '%s'" % metadata['title']
log.error(msg)
raise ValueError(msg)
events.append((dtstart, dtend, metadata))
def generate_ical_file(generator):
"""Generate an iCalendar file
"""
global events
ics_fname = generator.settings['PLUGIN_EVENTS']['ics_fname']
if not ics_fname:
return
ics_fname = os.path.join(generator.settings['OUTPUT_PATH'], ics_fname)
log.debug("Generating calendar at %s with %d events" % (ics_fname, len(events)))
tz = generator.settings.get('TIMEZONE', 'UTC')
tz = pytz.timezone(tz)
ical = icalendar.Calendar()
ical.add('prodid', '-//My calendar product//mxm.dk//')
ical.add('version', '2.0')
for e in events:
dtstart, dtend, metadata = e
ie = icalendar.Event(
summary=metadata['summary'],
dtstart=dtstart,
dtend=dtend,
dtstamp=metadata['date'],
priority=5,
uid=metadata['title'] + metadata['summary'],
)
if 'event-location' in metadata:
ie.add('location', metadata['event-location'])
ical.add_component(ie)
with open(ics_fname, 'wb') as f:
f.write(ical.to_ical())
def generate_events_list(generator):
"""Populate the event_list variable to be used in jinja templates"""
generator.context['events_list'] = sorted(events, reverse=True)
def register():
signals.article_generator_context.connect(parse_article)
signals.article_generator_finalized.connect(generate_ical_file)
signals.article_generator_finalized.connect(generate_events_list)
|
Python
| 0 |
@@ -3213,60 +3213,804 @@
-for e in events:%0A dtstart, dtend, metadata =
+multiLanguageSupportNecessary = %22i18n_subsites%22 in generator.settings%5B%22PLUGINS%22%5D%0A if multiLanguageSupportNecessary:%0A currentLang = os.path.basename(os.path.normpath(generator.settings%5B'OUTPUT_PATH'%5D))%0A sortedUniqueEvents = sorted(events)%0A last = sortedUniqueEvents%5B-1%5D%0A for i in range(len(sortedUniqueEvents)-2, -1,-1):%0A if last == sortedUniqueEvents%5Bi%5D:%0A del sortedUniqueEvents%5Bi%5D%0A else:%0A last = sortedUniqueEvents%5Bi%5D%0A else:%0A sortedUniqueEvents = events%0A%0A for e in sortedUniqueEvents:%0A dtstart, dtend, metadata = e%0A if multiLanguageSupportNecessary and currentLang != metadata%5B'lang'%5D:%0A log.debug(%22%25s is not equal to %25s%22 %25(currentLang, metadata%5B'lang'%5D))%0A continu
e%0A%0A
@@ -4395,16 +4395,134 @@
nt(ie)%0A%0A
+ if not os.path.exists(generator.settings%5B'OUTPUT_PATH'%5D):%0A os.makedirs(generator.settings%5B'OUTPUT_PATH'%5D)%0A%0A
with
|
2c6700d7a16ec7e76847f3664655aaf6c8f171eb
|
Create test_servo5v.py
|
test/test_servo5v.py
|
test/test_servo5v.py
|
Python
| 0.000003 |
@@ -0,0 +1,206 @@
+from gadgets.motors.servos import Servo5V%0Aimport time%0Aimport random%0A%0Aservo = Servo5V(pin_number=12,freq=100)%0Acount = 0%0Awhile count %3C 185:%0A%09time.sleep(0.1)%0A%09servo.write(count)%0A%09count += 5%0Aservo.cleanup()%09%0A%09%0A
|
|
7c9c95795dbbc5f64b532720f5749b58361c222b
|
add collector for http://www.dshield.org/
|
collectors/dshield.py
|
collectors/dshield.py
|
Python
| 0 |
@@ -0,0 +1,1982 @@
+#!/usr/bin/python%0A# -*- coding: utf-8 -*-%0A%0Aimport socket%0Aimport requests%0Aimport ipwhois%0Afrom pprint import pprint%0A%0A%0Adef get_url(url):%0A try:%0A res = requests.get(url)%0A except requests.exceptions.ConnectionError:%0A raise requests.exceptions.ConnectionError(%22DNS lookup failures%22)%0A else:%0A if res.status_code != 200:%0A raise requests.exceptions.ConnectionError(%0A %22the %7B%7D, answer with %7B%7D error%22.format(url, res.status_code))%0A%0A return res%0A%0A%0Adef get_ip(name):%0A attempts = 5%0A ip = %22undefined%22%0A while attempts:%0A try:%0A data = socket.gethostbyname_ex(name)%0A ip = data%5B2%5D%5B0%5D%0A break%0A except (socket.herror, socket.gaierror):%0A attempts -= 1%0A%0A return ip%0A%0A%0Adef get_who_is_and_country(ip):%0A try:%0A ip_obj = ipwhois.IPWhois(ip)%0A who_is = ip_obj.lookup(retry_count=5)%0A return str(who_is), who_is%5B'asn_country_code'%5D%0A except ipwhois.exceptions.IPDefinedError:%0A return %22Private-Use Networks%22, %22undefined%22%0A except ipwhois.exceptions.WhoisLookupError:%0A return %22undefined%22, %22undefined%22%0A%0A%0Adef gather():%0A attack_type = 'undefined'%0A base_url = %22http://www.dshield.org/feeds/suspiciousdomains_High.txt%22%0A%0A res = get_url(base_url)%0A for line in res.iter_lines():%0A if line%5B:1%5D == %22#%22 or line in (%22Site%22, %22%22):%0A continue%0A%0A host = line%0A if host%5B-1%5D == %22%5Ct%22:%0A host = line%5B:-1%5D%0A ip_address = get_ip(host)%0A if ip_address == %22undefined%22:%0A who_is, country = %22undefined%22, %22undefined%22%0A else:%0A who_is, country = get_who_is_and_country(ip_address)%0A%0A doc = %7B%0A 'IP': ip_address,%0A 'SourceInfo': base_url,%0A 'Type': attack_type,%0A 'Country': country,%0A 'Domain': host,%0A 'URL': host,%0A 'WhoIsInfo': who_is,%0A %7D%0A%0A pprint(doc)%0A%0A%0Aif __name__ == '__main__':%0A gather()%0A
|
|
8fe73523b7141f93d8523e56a7c6a5cc2ed82051
|
Test case for ioddrivesnmp class
|
src/collectors/iodrivesnmp/test/testiodrivesnmp.py
|
src/collectors/iodrivesnmp/test/testiodrivesnmp.py
|
Python
| 0 |
@@ -0,0 +1,668 @@
+#!/usr/bin/python%0A# coding=utf-8%0A################################################################################%0A%0Afrom test import CollectorTestCase%0Afrom test import get_collector_config%0A%0Afrom iodrivesnmp import IODriveSNMPCollector%0A%0A%0Aclass TestIODriveSNMPCollector(CollectorTestCase):%0A def setUp(self, allowed_names=None):%0A if not allowed_names:%0A allowed_names = %5B%5D%0A config = get_collector_config('IODriveSNMPCollector', %7B%0A 'allowed_names': allowed_names,%0A 'interval': 1%0A %7D)%0A self.collector = IODriveSNMPCollector(config, None)%0A%0A def test_import(self):%0A self.assertTrue(IODriveSNMPCollector)%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.