commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
2cd1e7fcdf53c312c3db8e6f1d257084a87cccbb
Add migration to update action implementation hashes.
recipe-server/normandy/recipes/migrations/0045_update_action_hashes.py
recipe-server/normandy/recipes/migrations/0045_update_action_hashes.py
Python
0
@@ -0,0 +1,1050 @@ +# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Aimport hashlib%0Afrom base64 import b64encode, urlsafe_b64encode%0A%0Afrom django.db import migrations%0A%0A%0Adef make_hashes_urlsafe_sri(apps, schema_editor):%0A Action = apps.get_model('recipes', 'Action')%0A%0A for action in Action.objects.all():%0A data = action.implementation.encode()%0A digest = hashlib.sha384(data).digest()%0A data_hash = urlsafe_b64encode(digest)%0A action.implementation_hash = 'sha384-' + data_hash.decode()%0A action.save()%0A%0A%0Adef make_hashes_sha1(apps, schema_editor):%0A Action = apps.get_model('recipes', 'Action')%0A%0A for action in Action.objects.all():%0A data = action.implementation.encode()%0A data_hash = hashlib.sha1(data).hexdigest()%0A action.implementation_hash = data_hash%0A action.save()%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('recipes', '0044_auto_20170801_0010'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(make_hashes_urlsafe_sri, make_hashes_sha1),%0A %5D%0A
fa349a967f9f1149dc9aae1bab168f7be7436320
Use HTTP/1.1 for memory cache http server.
tools/telemetry/telemetry/core/memory_cache_http_server.py
tools/telemetry/telemetry/core/memory_cache_http_server.py
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import BaseHTTPServer import mimetypes import os import SimpleHTTPServer import SocketServer import sys import zlib class MemoryCacheHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): def do_GET(self): """Serve a GET request.""" resource = self.SendHead() if resource: self.wfile.write(resource['response']) def do_HEAD(self): """Serve a HEAD request.""" self.SendHead() def SendHead(self): path = self.translate_path(self.path) ctype = self.guess_type(path) if path not in self.server.resource_map: self.send_error(404, 'File not found') return None resource = self.server.resource_map[path] self.send_response(200) self.send_header('Content-Type', ctype) self.send_header('Content-Length', str(resource['content-length'])) self.send_header('Last-Modified', self.date_time_string(resource['last-modified'])) if resource['zipped']: self.send_header('Content-Encoding', 'deflate') self.end_headers() return resource class MemoryCacheHTTPServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer): # Increase the request queue size. The default value, 5, is set in # SocketServer.TCPServer (the parent of BaseHTTPServer.HTTPServer). # Since we're intercepting many domains through this single server, # it is quite possible to get more than 5 concurrent requests. request_queue_size = 128 def __init__(self, host_port, handler, directories): BaseHTTPServer.HTTPServer.__init__(self, host_port, handler) self.resource_map = {} for path in directories: self.LoadResourceMap(path) def LoadResourceMap(self, cwd): """Loads all files in cwd into the in-memory resource map.""" for root, dirs, files in os.walk(cwd): # Skip hidden files and folders (like .svn and .git). files = [f for f in files if f[0] != '.'] dirs[:] = [d for d in dirs if d[0] != '.'] for f in files: file_path = os.path.join(root, f) if not os.path.exists(file_path): # Allow for '.#' files continue with open(file_path, 'rb') as fd: response = fd.read() fs = os.fstat(fd.fileno()) content_type = mimetypes.guess_type(file_path)[0] zipped = False if content_type in ['text/html', 'text/css', 'application/javascript']: zipped = True response = zlib.compress(response, 9) self.resource_map[file_path] = { 'content-length': len(response), 'last-modified': fs.st_mtime, 'response': response, 'zipped': zipped } def Main(): assert len(sys.argv) > 2, 'usage: %prog <port> [<path1>, <path2>, ...]' port = int(sys.argv[1]) directories = sys.argv[2:] server_address = ('127.0.0.1', port) MemoryCacheHTTPRequestHandler.protocol_version = 'HTTP/1.0' httpd = MemoryCacheHTTPServer(server_address, MemoryCacheHTTPRequestHandler, directories) httpd.serve_forever() if __name__ == '__main__': Main()
Python
0.000076
@@ -3115,17 +3115,17 @@ 'HTTP/1. -0 +1 '%0A http
1253cf2773b510f88b4391e22f0e98b4ef3cdf52
Create serializers.py
templates/root/main/serializers.py
templates/root/main/serializers.py
Python
0.000002
@@ -0,0 +1,595 @@ +from django.contrib.auth.models import User%0Afrom rest_framework import serializers%0Afrom %3C%25= appName %25%3E.models import Sample%0A%0Aclass SampleSerializer(serializers.HyperlinkedModelSerializer):%0A%09owner = serializers.ReadOnlyField(source='owner.username')%0A%09%0A%09class Meta:%0A%09%09model = Sample%0A%09%09fields = ('id', 'created', 'name', 'img_name', 'url', 'owner', 'info')%0A%09%09%0Aclass UserSerializer(serializers.HyperlinkedModelSerializer):%0A%09clownfish = serializers.HyperlinkedRelatedField(many=True, view_name='sample-detail', read_only=True)%0A%0A%09class Meta:%0A%09%09model = User%0A%09%09fields = ('url', 'username', 'sample')%0A%09%09%0A
c6015e049ab1ce059298af9147851f9a6a1c1e46
Replace NotImplemented singleton with NotImplementedError exceptin
src/ggrc_workflows/services/workflow_cycle_calculator/one_time_cycle_calculator.py
src/ggrc_workflows/services/workflow_cycle_calculator/one_time_cycle_calculator.py
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file> # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> # Created By: [email protected] # Maintained By: [email protected] import datetime from ggrc_workflows.services.workflow_cycle_calculator import cycle_calculator class OneTimeCycleCalculator(cycle_calculator.CycleCalculator): """CycleCalculator implementation for one-time workflows Because one-time workflows have concrete start and end dates already specified for tasks, we don't have to implement relative_day_to_date function and we can return all values in their raw format (we don't need to adjust for holidays). """ def __init__(self, workflow, base_date=None): super(OneTimeCycleCalculator, self).__init__(workflow) def relative_day_to_date(self, relative_day, relative_month=None, base_date=None): raise NotImplemented("Relative days are not applicable " "for one-time workflows.") def sort_tasks(self): self.tasks.sort(key=lambda t: self._date_normalizer(t.start_date)) @staticmethod def get_relative_start(task): raise NotImplemented("Relative days are not applicable " "for one-time workflows.") @staticmethod def get_relative_end(task): raise NotImplemented("Relative days are not applicable " "for one-time workflows.") @staticmethod def task_date_range(task, base_date=None): return task.start_date, task.end_date @staticmethod def _date_normalizer(d): if type(d) is datetime.datetime: return d.date() return d def workflow_date_range(self): tasks_start_dates = [ self._date_normalizer(task.start_date) for task in self.tasks] tasks_end_dates = [ self._date_normalizer(task.end_date) for task in self.tasks] return min(tasks_start_dates), max(tasks_end_dates) def next_cycle_start_date(self, base_date=None): return None
Python
0.999768
@@ -943,32 +943,37 @@ e NotImplemented +Error (%22Relative days @@ -985,32 +985,37 @@ ot applicable %22%0A + @@ -1211,32 +1211,37 @@ e NotImplemented +Error (%22Relative days @@ -1253,32 +1253,37 @@ ot applicable %22%0A + @@ -1389,16 +1389,21 @@ lemented +Error (%22Relati @@ -1423,32 +1423,37 @@ ot applicable %22%0A +
c775da3c1f73798ecaa1e1c46402bb03d21468cb
Update forward compatibility horizon to 2022-10-10
tensorflow/python/compat/compat.py
tensorflow/python/compat/compat.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for API compatibility between TensorFlow release versions. See [Version Compatibility](https://tensorflow.org/guide/version_compat#backward_forward) """ import datetime import os from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import tf_contextlib from tensorflow.python.util.tf_export import tf_export # This value changes every day with an automatic CL. It can be modified in code # via `forward_compatibility_horizon()` or with the environment variable # TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date. _FORWARD_COMPATIBILITY_HORIZON = datetime.date(2022, 10, 9) _FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS" _FORWARD_COMPATIBILITY_DATE_NUMBER = None def _date_to_date_number(year, month, day): return (year << 9) | (month << 5) | day def _update_forward_compatibility_date_number(date_to_override=None): """Update the base date to compare in forward_compatible function.""" global _FORWARD_COMPATIBILITY_DATE_NUMBER if date_to_override: date = date_to_override else: date = _FORWARD_COMPATIBILITY_HORIZON delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME) if delta_days: date += datetime.timedelta(days=int(delta_days)) if date < _FORWARD_COMPATIBILITY_HORIZON: logging.warning("Trying to set the forward compatibility date to the past" " date %s. This will be ignored by TensorFlow." % (date)) return _FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number( date.year, date.month, date.day) _update_forward_compatibility_date_number() @tf_export("compat.forward_compatible") def forward_compatible(year, month, day): """Return true if the forward compatibility window has expired. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). Forward-compatibility refers to scenarios where the producer of a TensorFlow model (a GraphDef or SavedModel) is compiled against a version of the TensorFlow library newer than what the consumer was compiled against. The "producer" is typically a Python program that constructs and trains a model while the "consumer" is typically another program that loads and serves the model. TensorFlow has been supporting a 3 week forward-compatibility window for programs compiled from source at HEAD. For example, consider the case where a new operation `MyNewAwesomeAdd` is created with the intent of replacing the implementation of an existing Python wrapper - `tf.add`. The Python wrapper implementation should change from something like: ```python def add(inputs, name=None): return gen_math_ops.add(inputs, name) ``` to: ```python from tensorflow.python.compat import compat def add(inputs, name=None): if compat.forward_compatible(year, month, day): # Can use the awesome new implementation. return gen_math_ops.my_new_awesome_add(inputs, name) # To maintain forward compatibility, use the old implementation. return gen_math_ops.add(inputs, name) ``` Where `year`, `month`, and `day` specify the date beyond which binaries that consume a model are expected to have been updated to include the new operations. This date is typically at least 3 weeks beyond the date the code that adds the new operation is committed. Args: year: A year (e.g., 2018). Must be an `int`. month: A month (1 <= month <= 12) in year. Must be an `int`. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an `int`. Returns: True if the caller can expect that serialized TensorFlow graphs produced can be consumed by programs that are compiled with the TensorFlow library source code after (year, month, day). """ return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number( year, month, day) @tf_export("compat.forward_compatibility_horizon") @tf_contextlib.contextmanager def forward_compatibility_horizon(year, month, day): """Context manager for testing forward compatibility of generated graphs. See [Version compatibility](https://www.tensorflow.org/guide/versions#backward_and_partial_forward_compatibility). To ensure forward compatibility of generated graphs (see `forward_compatible`) with older binaries, new features can be gated with: ```python if compat.forward_compatible(year=2018, month=08, date=01): generate_graph_with_new_features() else: generate_graph_so_older_binaries_can_consume_it() ``` However, when adding new features, one may want to unittest it before the forward compatibility window expires. This context manager enables such tests. For example: ```python from tensorflow.python.compat import compat def testMyNewFeature(self): with compat.forward_compatibility_horizon(2018, 08, 02): # Test that generate_graph_with_new_features() has an effect ``` Args: year: A year (e.g., 2018). Must be an `int`. month: A month (1 <= month <= 12) in year. Must be an `int`. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an `int`. Yields: Nothing. """ try: _update_forward_compatibility_date_number(datetime.date(year, month, day)) yield finally: _update_forward_compatibility_date_number()
Python
0
@@ -1335,17 +1335,18 @@ 22, 10, -9 +10 )%0A_FORWA
5dc2f523473f4921c3b7f1915966c0ac22b09474
Create package and metadatas
mots_vides/__init__.py
mots_vides/__init__.py
Python
0
@@ -0,0 +1,166 @@ +%22%22%22%0AMots-vides%0A%22%22%22%0A__version__ = '2015.1.21.dev0'%0A%0A__author__ = 'Fantomas42'%0A__email__ = '[email protected]'%0A%0A__url__ = 'https://github.com/Fantomas42/mots-vides'%0A
02e9602a5723aa3cbe9395290e4c18e439065007
Remove redundant code
numpy/distutils/tests/test_fcompiler.py
numpy/distutils/tests/test_fcompiler.py
from __future__ import division, absolute_import, print_function from numpy.testing import assert_ import numpy.distutils.fcompiler customizable_flags = [ ('f77', 'F77FLAGS'), ('f90', 'F90FLAGS'), ('free', 'FREEFLAGS'), ('arch', 'FARCH'), ('debug', 'FDEBUG'), ('flags', 'FFLAGS'), ('linker_so', 'LDFLAGS'), ] def test_fcompiler_flags(monkeypatch): monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '0') fc = numpy.distutils.fcompiler.new_fcompiler(compiler='none') flag_vars = fc.flag_vars.clone(lambda *args, **kwargs: None) for opt, envvar in customizable_flags: new_flag = '-dummy-{}-flag'.format(opt) prev_flags = getattr(flag_vars, opt) monkeypatch.setenv(envvar, new_flag) new_flags = getattr(flag_vars, opt) monkeypatch.delenv(envvar) assert_(new_flags == [new_flag]) monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '1') fc = numpy.distutils.fcompiler.new_fcompiler(compiler='none') flag_vars = fc.flag_vars.clone(lambda *args, **kwargs: None) for opt, envvar in customizable_flags: new_flag = '-dummy-{}-flag'.format(opt) prev_flags = getattr(flag_vars, opt) monkeypatch.setenv(envvar, new_flag) new_flags = getattr(flag_vars, opt) monkeypatch.delenv(envvar) if prev_flags is None: assert_(new_flags == [new_flag]) else: assert_(new_flags == prev_flags + [new_flag])
Python
0.999999
@@ -927,139 +927,8 @@ '1') -%0A fc = numpy.distutils.fcompiler.new_fcompiler(compiler='none')%0A flag_vars = fc.flag_vars.clone(lambda *args, **kwargs: None) %0A%0A
b9bb7e36977b757a63015ac3af8b538f0c67f16c
add manage.py
manage.py
manage.py
Python
0.000001
@@ -0,0 +1,460 @@ +from argparse import ArgumentParser%0A%0A%0Adef apply_migrates(args):%0A print('migrate')%0A%0A%0Adef make_parser():%0A parser = ArgumentParser()%0A subparsers = parser.add_subparsers()%0A%0A migrate = subparsers.add_parser('migrate')%0A migrate.set_defaults(func=apply_migrates)%0A%0A return parser%0A%0A%0Aif __name__ == '__main__':%0A parser = make_parser()%0A%0A args = parser.parse_args()%0A%0A if vars(args):%0A args.func(args)%0A else:%0A parser.print_help()%0A
8fa7120606e206d08acbad198e253ea428eef584
Add tests for inline list compilation
tests/compiler/test_inline_list_compilation.py
tests/compiler/test_inline_list_compilation.py
Python
0
@@ -0,0 +1,1107 @@ +import pytest%0A%0Afrom tests.compiler import compile_snippet, internal_call, STATIC_START, LOCAL_START%0Afrom thinglang.compiler.errors import NoMatchingOverload, InvalidReference%0Afrom thinglang.compiler.opcodes import OpcodePopLocal, OpcodePushStatic%0A%0A%0Adef test_inline_list_compilation():%0A%0A assert compile_snippet('list%3Cnumber%3E numbers = %5B1, 2, 3%5D') == %5B%0A OpcodePushStatic(STATIC_START), # Push the values%0A OpcodePushStatic(STATIC_START + 1),%0A OpcodePushStatic(STATIC_START + 2),%0A%0A internal_call('list.__constructor__'), # Create the list%0A%0A internal_call('list.append'), # Compile 3 append calls%0A internal_call('list.append'),%0A internal_call('list.append'),%0A%0A OpcodePopLocal(LOCAL_START)%0A %5D%0A%0A%0Adef test_inline_list_type_homogeneity():%0A with pytest.raises(NoMatchingOverload):%0A assert compile_snippet('list%3Cnumber%3E numbers = %5B1, Container(), 3%5D')%0A%0A%0Adef test_inline_list_declaration_type_match():%0A with pytest.raises(InvalidReference):%0A assert compile_snippet('list%3Cnumber%3E numbers = %5BContainer(), Container(), Container()%5D')%0A%0A%0A
142cb17be1c024839cd972071b2f9665c87ed5f1
Update downloadable clang to r338452
third_party/clang_toolchain/download_clang.bzl
third_party/clang_toolchain/download_clang.bzl
""" Helpers to download a recent clang release.""" def _get_platform_folder(os_name): os_name = os_name.lower() if os_name.startswith("windows"): return "Win" if os_name.startswith("mac os"): return "Mac" if not os_name.startswith("linux"): fail("Unknown platform") return "Linux_x64" def _download_chromium_clang( repo_ctx, platform_folder, package_version, sha256, out_folder): cds_url = "https://commondatastorage.googleapis.com/chromium-browser-clang" cds_file = "clang-%s.tgz" % package_version cds_full_url = "{0}/{1}/{2}".format(cds_url, platform_folder, cds_file) repo_ctx.download_and_extract(cds_full_url, output = out_folder, sha256 = sha256) def download_clang(repo_ctx, out_folder): """ Download a fresh clang release and put it into out_folder. Clang itself will be located in 'out_folder/bin/clang'. We currently download one of the latest releases of clang by the Chromium project (see https://chromium.googlesource.com/chromium/src/+/master/docs/clang.md). Args: repo_ctx: An instance of repository_context object. out_folder: A folder to extract the compiler into. """ # TODO(ibiryukov): we currently download and extract some extra tools in the # clang release (e.g., sanitizers). We should probably remove the ones # we don't need and document the ones we want provide in addition to clang. # Latest CLANG_REVISION and CLANG_SUB_REVISION of the Chromiums's release # can be found in https://chromium.googlesource.com/chromium/src/tools/clang/+/master/scripts/update.py CLANG_REVISION = "336424" CLANG_SUB_REVISION = 1 package_version = "%s-%s" % (CLANG_REVISION, CLANG_SUB_REVISION) checksums = { "Linux_x64": "2ea97e047470da648f5d078af008bce6891287592382cee3d53a1187d996da94", "Mac": "c6e28909cce63ee35e0d51284d9f0f6e8838f7fb8b7a0dc9536c2ea900552df0", "Win": "1299fda7c4378bfb81337f7e5f351c8a1f953f51e0744e2170454b8d722f3db7", } platform_folder = _get_platform_folder(repo_ctx.os.name) _download_chromium_clang( repo_ctx, platform_folder, package_version, checksums[platform_folder], out_folder, )
Python
0
@@ -1676,12 +1676,12 @@ %2233 -6424 +8452 %22%0A @@ -1821,237 +1821,237 @@ : %222 -ea97e047470da648f5d078af008bce6891287592382cee3d53a1187d996da94%22,%0A %22Mac%22: %22c6e28909cce63ee35e0d51284d9f0f6e8838f7fb8b7a0dc9536c2ea900552df0%22,%0A %22Win%22: %221299fda7c4378bfb81337f7e5f351c8a1f953f51e0744e2170454b8d722f3db7 +13ba23a0a9855ede5041f66661caa9c5c59a573ec60b82a31839f9a97f397bf%22,%0A %22Mac%22: %224267774201f8cb50c25e081375e87038d58db80064a20a0d9d7fe57ea4357ece%22,%0A %22Win%22: %22a8a5d5b25443c099e2c20d1a0cdce2f1d17e2dba84de66a6dc6a239ce3e78c34 %22,%0A
3ddf0f0fead6018b5c313253a0df2165452cfb6e
Add shared babel init code
src/eduid_common/api/translation.py
src/eduid_common/api/translation.py
Python
0
@@ -0,0 +1,549 @@ +# -*- coding: utf-8 -*-%0A%0Afrom flask import request%0Afrom flask_babel import Babel%0A%0A__author__ = 'lundberg'%0A%0A%0Adef init_babel(app):%0A babel = Babel(app)%0A app.babel = babel%0A%0A @babel.localeselector%0A def get_locale():%0A # if a user is logged in, use the locale from the user settings%0A # XXX: TODO%0A # otherwise try to guess the language from the user accept%0A # header the browser transmits. The best match wins.%0A return request.accept_languages.best_match(app.config.get('SUPPORTED_LANGUAGES'))%0A%0A return app%0A
30bca45e1ac9fc6953728950695135b491403215
Add test for logical constant folding.
tests/basics/logic_constfolding.py
tests/basics/logic_constfolding.py
Python
0
@@ -0,0 +1,442 @@ +# tests logical constant folding in parser%0A%0Adef f_true():%0A print('f_true')%0A return True%0A%0Adef f_false():%0A print('f_false')%0A return False%0A%0Aprint(0 or False)%0Aprint(1 or foo)%0Aprint(f_false() or 1 or foo)%0Aprint(f_false() or 1 or f_true())%0A%0Aprint(0 and foo)%0Aprint(1 and True)%0Aprint(f_true() and 0 and foo)%0Aprint(f_true() and 1 and f_false())%0A%0Aprint(not 0)%0Aprint(not False)%0Aprint(not 1)%0Aprint(not True)%0Aprint(not not 0)%0Aprint(not not 1)%0A
5b3863c90d4bc07bbc170fc213b4a4c46b3ddc01
Test setting selinux context on lost+found (#1038146)
tests/formats_test/selinux_test.py
tests/formats_test/selinux_test.py
Python
0
@@ -0,0 +1,2024 @@ +#!/usr/bin/python%0Aimport os%0Aimport selinux%0Aimport tempfile%0Aimport unittest%0A%0Afrom devicelibs_test import baseclass%0Afrom blivet.formats import device_formats%0Aimport blivet.formats.fs as fs%0A%0Aclass SELinuxContextTestCase(baseclass.DevicelibsTestCase):%0A %22%22%22Testing SELinux contexts.%0A %22%22%22%0A%0A @unittest.skipUnless(os.geteuid() == 0, %22requires root privileges%22)%0A def testMountingExt2FS(self):%0A _LOOP_DEV0 = self._loopMap%5Bself._LOOP_DEVICES%5B0%5D%5D%0A%0A an_fs = fs.Ext2FS(device=_LOOP_DEV0, label=%22test%22)%0A self.assertIsNone(an_fs.create())%0A%0A mountpoint = tempfile.mkdtemp(%22test.selinux%22)%0A an_fs.mount(mountpoint=mountpoint)%0A%0A root_selinux_context = selinux.getfilecon(mountpoint)%0A%0A lost_and_found = os.path.join(mountpoint, %22lost+found%22)%0A self.assertTrue(os.path.exists(lost_and_found))%0A%0A lost_and_found_selinux_context = selinux.getfilecon(lost_and_found)%0A%0A an_fs.unmount()%0A os.rmdir(mountpoint)%0A%0A self.assertEqual(root_selinux_context%5B1%5D, 'system_u:object_r:file_t:s0')%0A%0A self.assertEqual(lost_and_found_selinux_context%5B1%5D,%0A 'system_u:object_r:lost_found_t:s0')%0A%0A @unittest.skipUnless(os.geteuid() == 0, %22requires root privileges%22)%0A def testMountingXFS(self):%0A _LOOP_DEV0 = self._loopMap%5Bself._LOOP_DEVICES%5B0%5D%5D%0A%0A an_fs = fs.XFS(device=_LOOP_DEV0, label=%22test%22)%0A self.assertIsNone(an_fs.create())%0A%0A mountpoint = tempfile.mkdtemp(%22test.selinux%22)%0A an_fs.mount(mountpoint=mountpoint)%0A%0A root_selinux_context = selinux.getfilecon(mountpoint)%0A%0A lost_and_found = os.path.join(mountpoint, %22lost+found%22)%0A self.assertFalse(os.path.exists(lost_and_found))%0A%0A an_fs.unmount()%0A os.rmdir(mountpoint)%0A%0A self.assertEqual(root_selinux_context%5B1%5D, 'system_u:object_r:file_t:s0')%0A%0Adef suite():%0A suite1 = unittest.TestLoader().loadTestsFromTestCase(SELinuxContextTestCase)%0A return unittest.TestSuite(%5Bsuite1%5D)%0A%0A%0Aif __name__ == %22__main__%22:%0A unittest.main()%0A
68d620d56625c4c1bd30a30f31840d9bd440b29e
Add find_objects test module
tests/plantcv/test_find_objects.py
tests/plantcv/test_find_objects.py
Python
0.000002
@@ -0,0 +1,806 @@ +import cv2%0Aimport numpy as np%0Afrom plantcv.plantcv import find_objects%0A%0A%0Adef test_find_objects(test_data):%0A # Read in test data%0A img = cv2.imread(test_data.small_rgb_img)%0A mask = cv2.imread(test_data.small_bin_img, -1)%0A cnt, _ = test_data.load_contours(test_data.small_contours_file)%0A contours, _ = find_objects(img=img, mask=mask)%0A # Assert contours match test data%0A assert np.all(cnt) == np.all(contours)%0A%0A%0Adef test_find_objects_grayscale_input(test_data):%0A # Read in test data%0A img = cv2.imread(test_data.small_gray_img, -1)%0A mask = cv2.imread(test_data.small_bin_img, -1)%0A cnt, _ = test_data.load_contours(test_data.small_contours_file)%0A contours, _ = find_objects(img=img, mask=mask)%0A # Assert contours match test data%0A assert np.all(cnt) == np.all(contours)%0A
36033be962fcc3e97d14dd06b42bcd3be52a97c5
Add floting_point.py
parser/sample/floting_point.py
parser/sample/floting_point.py
Python
0.000001
@@ -0,0 +1,1872 @@ +import logging%0A%0Afrom lex_tokens import LexToken%0Afrom ply.yacc import yacc%0A%0A%0Aclass FloatingPointParser(object):%0A class FloatingPointSyntaxError(Exception): pass%0A%0A def __init__(self, debug=False):%0A if debug:%0A self._log = logging.getLogger('PhysicalDivideCharParser')%0A else:%0A self._log = yacc.NullLogger()%0A%0A self._lex = LexToken(debug)%0A self.tokens = self._lex.tokens%0A%0A self._parser = yacc.yacc(module=self, debug=debug, debuglog=self._log)%0A%0A def p_floating_point(self, p):%0A 'expression : floating'%0A p%5B0%5D = p%5B1%5D%0A%0A def p_floating_1(self, p):%0A 'floating : single_num DOT single_num'%0A p%5B0%5D = p%5B1%5D + p%5B2%5D + p%5B3%5D%0A%0A def p_floating_2(self, p):%0A 'floating : single_num dot_char single_num'%0A p%5B0%5D = p%5B1%5D + p%5B2%5D + p%5B3%5D%0A%0A def p_floating_3(self, p):%0A 'floating : single_num'%0A p%5B0%5D = p%5B1%5D%0A%0A def p_divid_dot(self, p):%0A 'dot_char : DOT'%0A p%5B0%5D = p%5B1%5D%0A%0A def p_sign1(self, p):%0A 'single_num : NUMBER'%0A p%5B0%5D = str(p%5B1%5D)%0A%0A def p_sign2(self, p):%0A 'single_num : MINUS NUMBER'%0A p%5B0%5D = p%5B1%5D + str(p%5B2%5D)%0A%0A def p_error(self, p):%0A if p is None: # End-of-file%0A raise self.FloatingPointSyntaxError('Parsing error (%25s)' %25 self.__expr_text)%0A err_msg = 'token type: %7B%7D, value: %7B%7D'.format(p.type, p.value)%0A raise self.FloatingPointSyntaxError(err_msg)%0A%0A def parse(self, s):%0A self.__expr_text = s%0A try:%0A return self._parser.parse(s, lexer=self._lex.lexer())%0A except self.FloatingPointSyntaxError:%0A print %22NOT Matched%22%0A return None%0A%0A%0Aif __name__ == '__main__':%0A header_parser = FloatingPointParser()%0A%0A data = '5.6'%0A data = '- 5.6'%0A data = 'VERSION 5.6 ;'%0A data = '5'%0A data = '-5'%0A print header_parser.parse(data)%0A%0A%0A
be17cf90b06a118d579c0211dd3bc2d45433fb2d
Write unit tests for _handle_long_response
tests/test_handle_long_response.py
tests/test_handle_long_response.py
Python
0.000064
@@ -0,0 +1,1679 @@ +import context%0A%0Aclass TestHandleLongResponse(context.slouch.testing.CommandTestCase):%0A bot_class = context.TimerBot%0A config = %7B'start_fmt': '%7B:%25Y%7D', 'stop_fmt': '%7B.days%7D'%7D%0A normal_text = %22@genericmention: this is generic mention message contains a URL %3Chttp://foo.com/%3E%5Cn@genericmention: this generic mention message contains a :fast_parrot: and :nyancat_big:%5Cn%22%0A over_limit_text = normal_text * 50 # 8550 chars%0A%0A%0A def test_handle_long_message_api(self):%0A _res = %7B%0A 'type': 'message',%0A 'text': self.normal_text,%0A 'channel': None,%0A %7D%0A responses = self.bot._handle_long_response(_res)%0A self.assertEqual(len(responses), 1)%0A self.assertEqual(responses, %5B%7B%0A 'type': 'message',%0A 'text': self.normal_text,%0A 'channel': None%0A %7D%5D)%0A%0A def test_handle_long_message_over_limit_api(self):%0A%0A _res = %7B%0A 'type': 'message',%0A 'text': self.over_limit_text,%0A 'channel': None,%0A %7D%0A responses = self.bot._handle_long_response(_res)%0A self.assertEqual(%5Blen(r%5B'text'%5D) for r in responses%5D, %5B3932, 3933, 685%5D)%0A self.assertEqual(len(responses), 3)%0A%0A def test_handle_long_message_rtm(self):%0A responses = self.bot._handle_long_response(self.normal_text)%0A self.assertEqual(responses, %5Bself.normal_text%5D)%0A self.assertEqual(len(responses), 1)%0A%0A def test_handle_long_message_over_limit_rtm(self):%0A responses = self.bot._handle_long_response(self.over_limit_text)%0A%0A self.assertEqual(%5Blen(r) for r in responses%5D, %5B3932, 3933, 685%5D)%0A self.assertEqual(len(responses), 3)%0A%0A%0A
feafe480d651ee6b58a1631f4eb4533f63ea6ad4
Add user tests
tests/api/test_user.py
tests/api/test_user.py
Python
0.000001
@@ -0,0 +1,1981 @@ +from unittest import mock%0A%0Afrom groupy.api import user%0Afrom .base import get_fake_response%0Afrom .base import TestCase%0A%0A%0Aclass UserTests(TestCase):%0A def setUp(self):%0A self.m_session = mock.Mock()%0A self.m_session.get.return_value = get_fake_response(data=%7B'id': 'foo'%7D)%0A self.user = user.User(self.m_session)%0A%0A def test_id_is_foo(self):%0A self.assertEqual(self.user.me%5B'id'%5D, 'foo')%0A%0A @mock.patch('groupy.api.user.blocks')%0A def test_blocks_uses_id(self, m_blocks):%0A self.user.blocks%0A (__, id_), __ = m_blocks.Blocks.call_args%0A self.assertEqual(id_, 'foo')%0A%0A def test_update(self):%0A data = %7B'bar': 'foo'%7D%0A self.m_session.post.return_value = get_fake_response(data=data)%0A result = self.user.update(foo='bar')%0A self.assertEqual(result, data)%0A%0A%0Aclass SmsModeTests(TestCase):%0A def setUp(self):%0A self.m_session = mock.Mock()%0A self.sms_mode = user.SmsMode(self.m_session)%0A self.m_session.post.return_value = mock.Mock(ok=True)%0A%0A%0Aclass EnableSmsModeTests(SmsModeTests):%0A def setUp(self):%0A super().setUp()%0A self.result = self.sms_mode.enable(duration=42)%0A%0A def test_result_is_True(self):%0A self.assertTrue(self.result)%0A%0A def test_payload_is_correct(self):%0A self.assert_kwargs(self.m_session.post, json=%7B'duration': 42%7D)%0A%0A%0Aclass EnableSmsModeWithRegistrationTests(SmsModeTests):%0A def setUp(self):%0A super().setUp()%0A self.result = self.sms_mode.enable(duration=42, registration_id=420)%0A%0A def test_result_is_True(self):%0A self.assertTrue(self.result)%0A%0A def test_payload_is_correct(self):%0A payload = %7B'duration': 42, 'registration_id': 420%7D%0A self.assert_kwargs(self.m_session.post, json=payload)%0A%0A%0Aclass DisableSmsModeTests(SmsModeTests):%0A def setUp(self):%0A super().setUp()%0A self.result = self.sms_mode.disable()%0A%0A def test_result_is_True(self):%0A self.assertTrue(self.result)%0A
063899021158fe872745b335595b3094db9834d8
Add a test for 'version.
pycket/test/test_version.py
pycket/test/test_version.py
Python
0.000055
@@ -0,0 +1,251 @@ +#! /usr/bin/env python%0A# -*- coding: utf-8 -*-%0A#%0A# Test the version here.%0A#%0A%0Aimport pytest%0Afrom pycket.test.testhelper import check_equal%0A%0AEXPECTED_VERSION='6.1.1.8'%0A%0A%0Adef test_version():%0A check_equal('(version)', '%22%25s%22' %25 EXPECTED_VERSION)%0A%0A# EOF%0A
e940963a6372a4de1a4a28eff1854716f47471e5
Add deploy script
conda-recipe/deploy.py
conda-recipe/deploy.py
Python
0.000001
@@ -0,0 +1,2429 @@ +#!/usr/bin/env python%0A%0A%0A%22%22%22%0ADeploy dbcollection to pypi and conda.%0A%22%22%22%0A%0A%0Aimport os%0Aimport shutil%0Aimport subprocess%0A%0A%0A# PyPi%0Aprint('PyPi: Upload sdist...')%0Amsg1 = subprocess.run(%5B%22python%22, 'setup.py', 'sdist', 'upload'%5D, stdout=subprocess.PIPE)%0Aprint('PyPi: Upload bdist_wheel...')%0Amsg2 = subprocess.run(%5B%22python%22, 'setup.py', 'bdist_wheel', 'upload'%5D, stdout=subprocess.PIPE)%0A%0A# Conda%0Apython_versions = %5B%222.7%22, %223.5%22, %223.6%22%5D%0Afor i, pyver in enumerate(python_versions):%0A print('%5CnAnaconda: Start build %7B%7D/%7B%7D'.format(i+1, len(python_versions)))%0A print(' %3E Python version: %7B%7D'.format(pyver))%0A%0A temp_output_dir = 'output_build'%0A print(' %3E Saving artifacts to dir: %7B%7D'.format(temp_output_dir))%0A if os.path.exists(temp_output_dir):%0A shutil.rmtree(temp_output_dir, ignore_errors=True)%0A%0A # build conda%0A print(' %3E Build conda recipe...')%0A cmd = %5B%22conda%22, 'build', '--python=%7B%7D'.format(pyver), '--no-anaconda-upload', 'conda-recipe'%5D%0A msg = subprocess.run(cmd, stdout=subprocess.PIPE)%0A%0A # parse string message%0A print(' %3E Parse conda artifact file name + path...')%0A msg_s = str(msg)%0A str_ini = %22If you want to upload package(s) to anaconda.org later, type:%5C%5Cn%5C%5Cnanaconda upload %22%0A str_end = %22%5C%5Cn%5C%5Cn# To have conda build upload to anaconda.org automatically%22%0A ini_id = msg_s.find(str_ini) + len(str_ini)%0A end_id = msg_s.find(str_end)%0A artifact_fname = msg_s%5Bini_id:end_id%5D%0A print(' %3E Artifact name: %7B%7D'.format(artifact_fname))%0A%0A%0A # convert to all platforms%0A print(' %3E Convert artifact to all platforms...')%0A msg = subprocess.run(%5B%22conda%22, 'convert', %22-p%22, %22all%22, artifact_fname, %22-o%22, temp_output_dir%5D, stdout=subprocess.PIPE)%0A%0A # upload to anaconda%0A print(' %3E Upload all artifact to all platforms...')%0A print(' -- Uploading artifact: %7B%7D'.format(artifact_fname))%0A msg_upload = subprocess.run(%5B%22anaconda%22, %22upload%22, artifact_fname%5D, stdout=subprocess.PIPE)%0A for root, dirs, files in os.walk(temp_output_dir):%0A if any(files):%0A for fname in files:%0A if fname.endswith('.tar.bz2'):%0A print(' -- Uploading artifact: %7B%7D '.format(root + '/' + fname))%0A msg = subprocess.run(%5B%22anaconda%22, 'upload', root + '/' + fname%5D, stdout=subprocess.PIPE)%0A%0A%0Aprint('%5CnRemoving temp dir: %7B%7D'.format(temp_output_dir))%0Aif os.path.exists(temp_output_dir):%0A shutil.rmtree(temp_output_dir, ignore_errors=True)
b52ba28a8315a0cdeda7593d087607f582f77f18
Create __init__.py
model/__init__.py
model/__init__.py
Python
0.000429
@@ -0,0 +1,20 @@ +__version__='0.0.0'%0A
721720b1f4d63f1368714f764794c8d406e4982d
Add to_data test
tests/test_firebase.py
tests/test_firebase.py
Python
0.000004
@@ -0,0 +1,168 @@ +import pytest%0Aimport linkatos.firebase as fb%0A%0A%0Adef test_to_data():%0A url = 'https://foo.com'%0A data = %7B'url': 'https://foo.com'%7D%0A assert fb.to_data(url) == data%0A
35e6e986ee41c0d951d6c56451abb49086ace30e
Enable text search on secondaries. PYTHON-530
pymongo/read_preferences.py
pymongo/read_preferences.py
# Copyright 2012 10gen, Inc. # # Licensed under the Apache License, Version 2.0 (the "License", # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for choosing which member of a replica set to read from.""" import random from pymongo.errors import ConfigurationError class ReadPreference: """An enum that defines the read preference modes supported by PyMongo. Used in three cases: :class:`~pymongo.mongo_client.MongoClient` connected to a single host: * `PRIMARY`: Queries are allowed if the host is standalone or the replica set primary. * All other modes allow queries to standalone servers, to the primary, or to secondaries. :class:`~pymongo.mongo_client.MongoClient` connected to a mongos, with a sharded cluster of replica sets: * `PRIMARY`: Queries are sent to the primary of a shard. * `PRIMARY_PREFERRED`: Queries are sent to the primary if available, otherwise a secondary. * `SECONDARY`: Queries are distributed among shard secondaries. An error is raised if no secondaries are available. * `SECONDARY_PREFERRED`: Queries are distributed among shard secondaries, or the primary if no secondary is available. * `NEAREST`: Queries are distributed among all members of a shard. :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`: * `PRIMARY`: Queries are sent to the primary of the replica set. * `PRIMARY_PREFERRED`: Queries are sent to the primary if available, otherwise a secondary. * `SECONDARY`: Queries are distributed among secondaries. An error is raised if no secondaries are available. * `SECONDARY_PREFERRED`: Queries are distributed among secondaries, or the primary if no secondary is available. * `NEAREST`: Queries are distributed among all members. """ PRIMARY = 0 PRIMARY_PREFERRED = 1 SECONDARY = 2 SECONDARY_ONLY = 2 SECONDARY_PREFERRED = 3 NEAREST = 4 # For formatting error messages modes = { ReadPreference.PRIMARY: 'PRIMARY', ReadPreference.PRIMARY_PREFERRED: 'PRIMARY_PREFERRED', ReadPreference.SECONDARY: 'SECONDARY', ReadPreference.SECONDARY_PREFERRED: 'SECONDARY_PREFERRED', ReadPreference.NEAREST: 'NEAREST', } _mongos_modes = [ 'primary', 'primaryPreferred', 'secondary', 'secondaryPreferred', 'nearest', ] def mongos_mode(mode): return _mongos_modes[mode] def mongos_enum(enum): return _mongos_modes.index(enum) def select_primary(members): for member in members: if member.is_primary: if member.up: return member else: return None return None def select_member_with_tags(members, tags, secondary_only, latency): candidates = [] for candidate in members: if not candidate.up: continue if secondary_only and candidate.is_primary: continue if not (candidate.is_primary or candidate.is_secondary): # In RECOVERING or similar state continue if candidate.matches_tags(tags): candidates.append(candidate) if not candidates: return None # ping_time is in seconds fastest = min([candidate.get_avg_ping_time() for candidate in candidates]) near_candidates = [ candidate for candidate in candidates if candidate.get_avg_ping_time() - fastest < latency / 1000.] return random.choice(near_candidates) def select_member( members, mode=ReadPreference.PRIMARY, tag_sets=None, latency=15 ): """Return a Member or None. """ if tag_sets is None: tag_sets = [{}] # For brevity PRIMARY = ReadPreference.PRIMARY PRIMARY_PREFERRED = ReadPreference.PRIMARY_PREFERRED SECONDARY = ReadPreference.SECONDARY SECONDARY_PREFERRED = ReadPreference.SECONDARY_PREFERRED NEAREST = ReadPreference.NEAREST if mode == PRIMARY: if tag_sets != [{}]: raise ConfigurationError("PRIMARY cannot be combined with tags") return select_primary(members) elif mode == PRIMARY_PREFERRED: # Recurse. candidate_primary = select_member(members, PRIMARY, [{}], latency) if candidate_primary: return candidate_primary else: return select_member(members, SECONDARY, tag_sets, latency) elif mode == SECONDARY: for tags in tag_sets: candidate = select_member_with_tags(members, tags, True, latency) if candidate: return candidate return None elif mode == SECONDARY_PREFERRED: # Recurse. candidate_secondary = select_member( members, SECONDARY, tag_sets, latency) if candidate_secondary: return candidate_secondary else: return select_member(members, PRIMARY, [{}], latency) elif mode == NEAREST: for tags in tag_sets: candidate = select_member_with_tags(members, tags, False, latency) if candidate: return candidate # Ran out of tags. return None else: raise ConfigurationError("Invalid mode %s" % repr(mode)) """Commands that may be sent to replica-set secondaries, depending on ReadPreference and tags. All other commands are always run on the primary. """ secondary_ok_commands = frozenset([ "group", "aggregate", "collstats", "dbstats", "count", "distinct", "geonear", "geosearch", "geowalk", "mapreduce", "getnonce", "authenticate", ]) class MovingAverage(object): def __init__(self, samples): """Immutable structure to track a 5-sample moving average. """ self.samples = samples[-5:] assert self.samples self.average = sum(self.samples) / float(len(self.samples)) def clone_with(self, sample): """Get a copy of this instance plus a new sample""" return MovingAverage(self.samples + [sample]) def get(self): return self.average
Python
0.000003
@@ -6072,16 +6072,28 @@ icate%22,%0A + %22text%22,%0A %5D)%0A%0A%0Acla
4b2df28a979312875d5a72a1713f535b0e34a1e6
fix config mocking.
solr/test_solr.py
solr/test_solr.py
# (C) Datadog, Inc. 2010-2016 # All rights reserved # Licensed under Simplified BSD License (see LICENSE) # stdlib import threading import time from types import ListType import unittest # 3p from nose.plugins.attrib import attr # project from aggregator import MetricsAggregator from dogstatsd import Server from jmxfetch import JMXFetch from shared.test.common import Fixtures STATSD_PORT = 8127 class DummyReporter(threading.Thread): def __init__(self, metrics_aggregator): threading.Thread.__init__(self) self.finished = threading.Event() self.metrics_aggregator = metrics_aggregator self.interval = 10 self.metrics = None self.finished = False self.start() def run(self): while not self.finished: time.sleep(self.interval) self.flush() def flush(self): metrics = self.metrics_aggregator.flush() if metrics: self.metrics = metrics @attr(requires='solr') class JMXTestCase(unittest.TestCase): def setUp(self): aggregator = MetricsAggregator("test_host") self.server = Server(aggregator, "localhost", STATSD_PORT) self.reporter = DummyReporter(aggregator) self.t1 = threading.Thread(target=self.server.start) self.t1.start() confd_path = Fixtures.directory() self.jmx_daemon = JMXFetch(confd_path, {'dogstatsd_port': STATSD_PORT}) self.t2 = threading.Thread(target=self.jmx_daemon.run) self.t2.start() def tearDown(self): self.server.stop() self.reporter.finished = True self.jmx_daemon.terminate() def testTomcatMetrics(self): count = 0 while self.reporter.metrics is None: time.sleep(1) count += 1 if count > 25: raise Exception("No metrics were received in 25 seconds") metrics = self.reporter.metrics self.assertTrue(isinstance(metrics, ListType)) self.assertTrue(len(metrics) > 8, metrics) self.assertEquals(len([t for t in metrics if 'instance:solr_instance' in t['tags'] and t['metric'] == "jvm.thread_count"]), 1, metrics) self.assertTrue(len([t for t in metrics if "jvm." in t['metric'] and 'instance:solr_instance' in t['tags']]) > 4, metrics) self.assertTrue(len([t for t in metrics if "solr." in t['metric'] and 'instance:solr_instance' in t['tags']]) > 4, metrics)
Python
0
@@ -180,16 +180,28 @@ unittest +%0Aimport mock %0A%0A# 3p%0Af @@ -235,16 +235,31 @@ ort attr +%0Aimport logging %0A%0A# proj @@ -312,101 +312,694 @@ rom -dogstatsd import Server%0Afrom jmxfetch import JMXFetch%0Afrom shared.test.common import Fixtures +tests.checks.common import Fixtures%0A%0A%0ALOG_INFO = %7B%0A 'log_level': None,%0A 'log_to_event_viewer': False,%0A 'log_to_syslog': False,%0A 'syslog_host': None,%0A 'syslog_port': None,%0A 'log_level': logging.INFO,%0A 'disable_file_logging': True,%0A 'collector_log_file': '/var/log/datadog/collector.log',%0A 'forwarder_log_file': '/var/log/datadog/forwarder.log',%0A 'dogstatsd_log_file': '/var/log/datadog/dogstatsd.log',%0A 'jmxfetch_log_file': '/var/log/datadog/jmxfetch.log',%0A 'go-metro_log_file': '/var/log/datadog/go-metro.log',%0A%7D%0A%0Awith mock.patch('config.get_logging_config', return_value=LOG_INFO):%0A from dogstatsd import Server%0A from jmxfetch import JMXFetch%0A %0A%0AST @@ -1016,17 +1016,16 @@ = 8127%0A%0A -%0A class Du
fe37335645993ad10c9902aaaaf0ca2c53912d49
Create Average Movies rating etl
movies_avg_etl.py
movies_avg_etl.py
Python
0
@@ -0,0 +1,1882 @@ +import pyspark%0A%0Aspark = (%0A pyspark.sql.SparkSession.builder.appName(%22FromDatabase%22)%0A .config(%22spark.driver.extraClassPath%22, %22%3Cdriver_location%3E/postgresql-42.2.18.jar%22)%0A .getOrCreate()%0A)%0A%0A%0A# Read table from db using Spark JDBC%0Adef extract_movies_to_df():%0A movies_df = (%0A spark.read.format(%22jdbc%22)%0A .option(%22url%22, %22jdbc:postgresql://localhost:5432/etl_pipeline%22)%0A .option(%22dbtable%22, %22movies%22)%0A .option(%22user%22, %22%3Cusername%22)%0A .option(%22password%22, %22%3Cpassword%3E%22)%0A .option(%22driver%22, %22org.postgresql.Driver%22)%0A .load()%0A )%0A return movies_df%0A%0A%0A# Read users table from db using Spark JDBC%0A%0A%0Adef extract_users_to_df():%0A users_df = (%0A spark.read.format(%22jdbc%22)%0A .option(%22url%22, %22jdbc:postgresql://localhost:5432/etl_pipeline%22)%0A .option(%22dbtable%22, %22users%22)%0A .option(%22user%22, %22%3Cusername%22)%0A .option(%22password%22, %22%3Cpassword%3E%22)%0A .option(%22driver%22, %22org.postgresql.Driver%22)%0A .load()%0A )%0A return users_df%0A%0A%0A# transforming tables%0Adef transform_avg_ratings(movies_df, users_df):%0A avg_rating = users_df.groupby(%22movie_id%22).mean(%22rating%22)%0A # join movies_df and avg_rating table on id%0A df = movies_df.join(avg_rating, movies_df.id == avg_rating.movies_id)%0A df = df.drop(%22movie_id%22)%0A return df%0A%0A%0A# Write the result into avg_ratings table in db%0Adef load_df_to_db(df):%0A mode = %22overwrite%22%0A url = %22jdbc:postgresql://localhost:5432/etl_pipeline%22%0A spark.write()%0A properties = %7B%0A %22user%22: %22%3Cusername%3E%22,%0A %22password%22: %22%3Cpassword%3E%22,%0A %22driver%22: %22org.postgresql.Driver%22,%0A %7D%0A df.write.jdbc(url=url, table=%22avg_ratings%22, mode=mode, properties=properties)%0A%0A%0Aif __name__ == %22__main__%22:%0A movies_df = extract_movies_to_df()%0A users_df = extract_users_to_df()%0A ratings_df = transform_avg_ratings(movies_df, users_df)%0A load_df_to_db(ratings_df)%0A
e072d06ce649765b9c7f8be13af09cea0ef60a0b
Improve autonested_transaction
neutron/db/api.py
neutron/db/api.py
# Copyright 2011 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib from oslo_config import cfg from oslo_db import api as oslo_db_api from oslo_db import exception as db_exc from oslo_db.sqlalchemy import session from oslo_utils import excutils from oslo_utils import uuidutils from sqlalchemy import exc from neutron.common import exceptions as n_exc from neutron.db import common_db_mixin _FACADE = None MAX_RETRIES = 10 is_deadlock = lambda e: isinstance(e, db_exc.DBDeadlock) retry_db_errors = oslo_db_api.wrap_db_retry( max_retries=MAX_RETRIES, retry_on_request=True, exception_checker=is_deadlock ) @contextlib.contextmanager def exc_to_retry(exceptions): try: yield except Exception as e: with excutils.save_and_reraise_exception() as ctx: if isinstance(e, exceptions): ctx.reraise = False raise db_exc.RetryRequest(e) def _create_facade_lazily(): global _FACADE if _FACADE is None: _FACADE = session.EngineFacade.from_config(cfg.CONF, sqlite_fk=True) return _FACADE def get_engine(): """Helper method to grab engine.""" facade = _create_facade_lazily() return facade.get_engine() def dispose(): # Don't need to do anything if an enginefacade hasn't been created if _FACADE is not None: get_engine().pool.dispose() def get_session(autocommit=True, expire_on_commit=False, use_slave=False): """Helper method to grab session.""" facade = _create_facade_lazily() return facade.get_session(autocommit=autocommit, expire_on_commit=expire_on_commit, use_slave=use_slave) @contextlib.contextmanager def autonested_transaction(sess): """This is a convenience method to not bother with 'nested' parameter.""" try: session_context = sess.begin_nested() except exc.InvalidRequestError: session_context = sess.begin(subtransactions=True) finally: with session_context as tx: yield tx # Common database operation implementations def get_object(context, model, **kwargs): with context.session.begin(subtransactions=True): return (common_db_mixin.model_query(context, model) .filter_by(**kwargs) .first()) def get_objects(context, model, **kwargs): with context.session.begin(subtransactions=True): return (common_db_mixin.model_query(context, model) .filter_by(**kwargs) .all()) def create_object(context, model, values): with context.session.begin(subtransactions=True): if 'id' not in values: values['id'] = uuidutils.generate_uuid() db_obj = model(**values) context.session.add(db_obj) return db_obj.__dict__ def _safe_get_object(context, model, id, key='id'): db_obj = get_object(context, model, **{key: id}) if db_obj is None: raise n_exc.ObjectNotFound(id=id) return db_obj def update_object(context, model, id, values, key=None): with context.session.begin(subtransactions=True): kwargs = {} if key: kwargs['key'] = key db_obj = _safe_get_object(context, model, id, **kwargs) db_obj.update(values) db_obj.save(session=context.session) return db_obj.__dict__ def delete_object(context, model, id, key=None): with context.session.begin(subtransactions=True): kwargs = {} if key: kwargs['key'] = key db_obj = _safe_get_object(context, model, id, **kwargs) context.session.delete(db_obj)
Python
0.000006
@@ -854,35 +854,8 @@ tils -%0Afrom sqlalchemy import exc %0A%0Afr @@ -2375,27 +2375,41 @@ ter.%22%22%22%0A -try +if sess.is_active :%0A se @@ -2438,52 +2438,30 @@ egin -_ +( nested -( +=True )%0A e -xcept exc.InvalidRequestError +lse :%0A @@ -2521,25 +2521,8 @@ ue)%0A - finally:%0A @@ -2545,28 +2545,24 @@ text as tx:%0A - yiel
c95bfb10f87bd0a637d0ad790d484b7957441371
Add WSGI support.
pypi.wsgi
pypi.wsgi
Python
0
@@ -0,0 +1,1559 @@ +#!/usr/bin/python%0Aimport sys,os%0Aprefix = os.path.dirname(__file__)%0Asys.path.insert(0, prefix)%0Aimport cStringIO, webui, store, config%0A%0Astore.keep_conn = True%0A%0Aclass Request:%0A%0A def __init__(self, environ, start_response):%0A self.start_response = start_response%0A self.rfile = cStringIO.StringIO(environ%5B'wsgi.input'%5D.read())%0A self.wfile = cStringIO.StringIO()%0A self.config = config.Config(prefix+'/config.ini', 'webui')%0A %0A def send_response(self, code, message=''):%0A self.status = '%25s %25s' %25 (code, message)%0A self.headers = %5B%5D%0A %0A def send_header(self, keyword, value):%0A self.headers.append((keyword, value))%0A%0A def set_content_type(self, content_type):%0A self.send_header('Content-Type', content_type)%0A%0A def end_headers(self):%0A self.start_response(self.status, self.headers)%0A%0Adef debug(environ, start_response):%0A if environ%5B'PATH_INFO'%5D.startswith(%22/auth%22) and %5C%0A %22HTTP_AUTHORIZATION%22 not in environ:%0A start_response(%22401 login%22,%0A %5B('WWW-Authenticate', 'Basic realm=%22foo%22')%5D)%0A return%0A start_response(%22200 ok%22, %5B('Content-type', 'text/plain')%5D)%0A environ = environ.items()%0A environ.sort()%0A for k,v in environ:%0A yield %22%25s=%25s%5Cn%22 %25 (k, v)%0A return%0A%0A%0Adef application(environ, start_response):%0A if %22HTTP_AUTHORIZATION%22 in environ:%0A environ%5B%22HTTP_CGI_AUTHORIZATION%22%5D = environ%5B%22HTTP_AUTHORIZATION%22%5D%0A r = Request(environ, start_response)%0A webui.WebUI(r, environ).run()%0A return %5Br.wfile.getvalue()%5D%0A
39313cd933e0038b9a9bfa8b6b4cb50e3707d455
add k_min.py
Algo-1/week2/7-K-Min/k_min.py
Algo-1/week2/7-K-Min/k_min.py
Python
0.000104
@@ -0,0 +1,1328 @@ +class KMin:%0A%0A # Quick sort%0A%0A @staticmethod%0A def swap(numbers, i, j):%0A temp = numbers%5Bi%5D%0A numbers%5Bi%5D = numbers%5Bj%5D%0A numbers%5Bj%5D = temp%0A%0A # The last element is a pivot, all smaller elements are to left of it%0A # and greater elements to right%0A @staticmethod%0A def partition(numbers, l, r):%0A x = numbers%5Br%5D%0A i = l%0A%0A for j in range(l, r):%0A if numbers%5Bj%5D %3C= x:%0A KMin.swap(numbers, i, j)%0A i += 1%0A%0A KMin.swap(numbers, i, r)%0A%0A return i%0A%0A @staticmethod%0A def kthSmallest(numbers, l, r, k):%0A if k %3E 0 and k %3C= r - l + 1:%0A pos = KMin.partition(numbers, l, r)%0A%0A if pos - l == k - 1:%0A return numbers%5Bpos%5D%0A%0A if pos - l %3E k - 1:%0A return KMin.kthSmallest(numbers, l, pos - 1, k)%0A%0A return KMin.kthSmallest(numbers, pos + 1, r, k - pos + l - 1)%0A%0A # Finds the k-th minimum element in an unsorted collection.%0A # numbers - %5Bint%5D%0A # k - int%0A @staticmethod%0A def kthMinimum(numbers, k):%0A return KMin.kthSmallest(numbers, 0, len(numbers) - 1, k)%0A%0A%0Adef main():%0A%0A numbers = %5B33, 8, 5, 2, 3, 6, 1, 4, 9, 99%5D%0A%0A for i in range(1, len(numbers) + 1):%0A print(KMin.kthMinimum(numbers, i))%0A%0Aif __name__ == '__main__':%0A main()%0A
f44fd9df7ac7fa5e553e99d98c1376439a33ffc8
Change device pull to handle root,and renamed local file as well history.db from results.db
wlauto/workloads/androbench/__init__.py
wlauto/workloads/androbench/__init__.py
# Copyright 2013-2015 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import sqlite3 from wlauto import AndroidUiAutoBenchmark class Androbench(AndroidUiAutoBenchmark): name = 'androbench' description = """Androbench measures the storage performance of device""" package = 'com.andromeda.androbench2' activity = '.main' run_timeout = 10 * 60 def update_result(self, context): super(Androbench, self).update_result(context) dbn = 'databases/history.db' db = self.device.path.join(self.device.package_data_directory, self.package, dbn) host_results = os.path.join(context.output_directory, 'results.db') self.device.pull_file(db, host_results) qs = 'select * from history' conn = sqlite3.connect(host_results) c = conn.cursor() c.execute(qs) results = c.fetchone() context.result.add_metric('Sequential Read ', results[8], 'MB/s') context.result.add_metric('Sequential Write ', results[9], 'MB/s') context.result.add_metric('Random Read ', results[10], 'MB/s') context.result.add_metric('Random Write ', results[12], 'MB/s')
Python
0
@@ -1168,23 +1168,23 @@ ctory, ' -results +history .db')%0A @@ -1219,32 +1219,46 @@ db, host_results +, as_root=True )%0A qs = '
f700ca39535c5eb14015dd84f4bc0dad2b086d23
Add ex_fzf.py
examples/ex_fzf.py
examples/ex_fzf.py
Python
0.000005
@@ -0,0 +1,885 @@ +#!/usr/bin/env python%0Aimport string%0Aimport textwrap%0Aimport pprint%0A%0Afrom dynmen import Menu%0A%0A%0Afzf = Menu(command=('fzf',))%0A%0Aexampl_inp_dict = vars(string)%0Aexampl_inp_dict = %7Bk:v for k,v in exampl_inp_dict.items() if not k.startswith('_')%7D%0A%0Adef print_obj(obj, prefix=' '):%0A txt = pprint.pformat(obj)%0A lines = %5B%5D%0A for line in txt.splitlines():%0A line = textwrap.indent(line, prefix)%0A lines.append(line)%0A print('%5Cn'.join(lines))%0A%0Adef run_n_print(entries, fn_str):%0A fn = globals()%5Bfn_str.split('.')%5B0%5D%5D%0A for attr in fn_str.split('.')%5B1:%5D:%0A fn = getattr(fn, attr)%0A print(%22%5CnLAUNCHING '%7B%7D' with -%22.format(fn_str))%0A print_obj(entries)%0A output = fn(entries)%0A print('OUTPUT IS -')%0A print_obj(output)%0A return output%0A%0Arun_n_print(exampl_inp_dict, 'fzf')%0Arun_n_print(exampl_inp_dict, 'fzf.sort')%0Arun_n_print(list(exampl_inp_dict), 'fzf')%0A%0A
45edceb65a9cac9f61215ad77e9c048d092c0b57
add examples/roster.py
examples/roster.py
examples/roster.py
Python
0
@@ -0,0 +1,3276 @@ +%0Aimport dbus%0Aimport dbus.glib%0Aimport gobject%0Aimport sys%0A%0Afrom account import read_account, connect%0A%0Afrom telepathy.client.channel import Channel%0Afrom telepathy.constants import (%0A CONNECTION_HANDLE_TYPE_CONTACT, CONNECTION_HANDLE_TYPE_LIST,%0A CONNECTION_STATUS_CONNECTED, CONNECTION_STATUS_DISCONNECTED)%0Afrom telepathy.errors import NotAvailable%0Afrom telepathy.interfaces import (%0A CHANNEL_INTERFACE_GROUP, CHANNEL_TYPE_CONTACT_LIST, CONN_INTERFACE)%0A%0Adef print_members(conn, chan):%0A current, local_pending, remote_pending = (%0A chan%5BCHANNEL_INTERFACE_GROUP%5D.GetAllMembers())%0A%0A for member in current:%0A print ' - %25s' %25 (%0A conn%5BCONN_INTERFACE%5D.InspectHandles(%0A CONNECTION_HANDLE_TYPE_CONTACT, %5Bmember%5D)%5B0%5D)%0A%0A if not current:%0A print ' (none)'%0A%0Aclass RosterClient:%0A def __init__(self, conn):%0A self.conn = conn%0A%0A conn%5BCONN_INTERFACE%5D.connect_to_signal(%0A 'StatusChanged', self.status_changed_cb)%0A%0A def _request_list_channel(self, name):%0A handle = self.conn%5BCONN_INTERFACE%5D.RequestHandles(%0A CONNECTION_HANDLE_TYPE_LIST, %5Bname%5D)%5B0%5D%0A chan_path = self.conn%5BCONN_INTERFACE%5D.RequestChannel(%0A CHANNEL_TYPE_CONTACT_LIST, CONNECTION_HANDLE_TYPE_LIST,%0A handle, True)%0A return Channel(self.conn._dbus_object._named_service, chan_path)%0A%0A def status_changed_cb(self, state, reason):%0A if state == CONNECTION_STATUS_DISCONNECTED:%0A print 'disconnected: %25s' %25 reason%0A self.quit()%0A return%0A%0A if state != CONNECTION_STATUS_CONNECTED:%0A return%0A%0A print 'connected'%0A%0A for name in ('subscribe', 'publish', 'hide', 'allow', 'deny', 'known'):%0A try:%0A chan = self._request_list_channel(name)%0A except dbus.DBusException:%0A print %22'%25s' channel is not available%22 %25 name%0A continue%0A%0A # hack%0A chan._valid_interfaces.add(CHANNEL_INTERFACE_GROUP)%0A%0A print '%25s: members' %25 name%0A print_members(self.conn, chan)%0A%0A chan%5BCHANNEL_INTERFACE_GROUP%5D.connect_to_signal('MembersChanged',%0A lambda *args: self.members_changed_cb(name, *args))%0A%0A print 'waiting for changes'%0A%0A def members_changed_cb(self, name, message, added, removed, local_pending,%0A remote_pending, actor, reason):%0A if added:%0A for handle in added:%0A print '%25s: added: %25d' %25 (name, added)%0A%0A if removed:%0A for handle in removed:%0A print '%25s: removed: %25d' %25 (name, added)%0A%0A def run(self):%0A self.loop = gobject.MainLoop()%0A%0A try:%0A self.loop.run()%0A except KeyboardInterrupt:%0A print 'interrupted'%0A%0A def quit(self):%0A self.loop.quit()%0A%0Aif __name__ == '__main__':%0A assert len(sys.argv) == 2%0A account_file = sys.argv%5B1%5D%0A%0A manager, protocol, account = read_account(account_file)%0A conn = connect(manager, protocol, account)%0A client = RosterClient(conn)%0A%0A print %22connecting%22%0A conn%5BCONN_INTERFACE%5D.Connect()%0A client.run()%0A print %22disconnecting%22%0A%0A try:%0A conn%5BCONN_INTERFACE%5D.Disconnect()%0A except dbus.dbus_bindings.DBusException:%0A pass%0A%0A
7d198f3eaca6a91b731b3e25c0285cd46e72935a
Remove duplicates in authorized origins table
swh/web/common/migrations/0005_remove_duplicated_authorized_origins.py
swh/web/common/migrations/0005_remove_duplicated_authorized_origins.py
Python
0.000001
@@ -0,0 +1,880 @@ +# Copyright (C) 2019 The Software Heritage developers%0A# See the AUTHORS file at the top-level directory of this distribution%0A# License: GNU Affero General Public License version 3, or any later version%0A# See top-level LICENSE file for more information%0A%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0A%0Afrom swh.web.common.models import SaveAuthorizedOrigin%0A%0A%0Adef _remove_duplicated_urls_in_authorized_list(apps, schema_editor):%0A sao = SaveAuthorizedOrigin.objects%0A for url in sao.values_list('url', flat=True).distinct():%0A sao.filter(pk__in=sao.filter(%0A url=url).values_list('id', flat=True)%5B1:%5D).delete()%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('swh.web.common', '0004_auto_20190204_1324'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(_remove_duplicated_urls_in_authorized_list)%0A %5D%0A
91541cf82f435cb261d9debc85a2a8ae6dd74ab1
Add a function to initialize the logging.
xutils/init_logging.py
xutils/init_logging.py
Python
0
@@ -0,0 +1,1286 @@ +# encoding: utf-8%0Afrom __future__ import print_function, absolute_import, unicode_literals, division%0Aimport logging%0A%0A%0Adef init_logging(logger=None, level=%22DEBUG%22, log_file=%22%22, file_config=None, dict_config=None):%0A # Initialize the argument logger with the arguments, level and log_file.%0A if logger:%0A fmt = %22%25(asctime)s - %25(pathname)s - %25(funcName)s - %25(lineno)d - %25(levelname)s - %25(message)s%22%0A datefmt = %22%25Y-%25m-%25d %25H:%25M:%25S%22%0A formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)%0A%0A level = getattr(logging, level.upper())%0A logger.setLevel(level)%0A%0A if log_file:%0A from logging.handlers import TimedRotatingFileHandler%0A handler = TimedRotatingFileHandler(log_file, when=%22midnight%22, interval=1, backupCount=30)%0A else:%0A handler = logging.StreamHandler()%0A handler.setLevel(level)%0A handler.setFormatter(formatter)%0A%0A logger.addHandler(handler)%0A%0A # Initialize logging by the configuration file, file_config.%0A if file_config:%0A logging.config.fileConfig(file_config, disable_existing_loggers=False)%0A%0A # Initialize logging by the dict configuration, dict_config.%0A if dict_config and hasattr(logging.config, %22dictConfig%22):%0A logging.config.dictConfig(dict_config)%0A
507e3bad4e877330eea29675dafb8210ab6bada5
Add tests for file agent
tests/test_agent.py
tests/test_agent.py
Python
0
@@ -0,0 +1,1464 @@ +%22%22%22%0ATests for a agent.%0A%22%22%22%0A%0Aimport io%0Aimport os%0A%0Aimport pytest%0A%0Afrom onirim import action%0Afrom onirim import agent%0Afrom onirim import component%0A%0Adef file_agent(in_str):%0A return agent.File(io.StringIO(in_str), open(os.devnull, %22w%22))%0A%0Adef content():%0A return component.Content(%5B%5D)%0A%0A%[email protected](%0A %22in_str, expected%22,%0A %5B%0A (%22play%5Cn0%5Cn%22, (action.Phase1.play, 0)),%0A (%22discard%5Cn4%5Cn%22, (action.Phase1.discard, 4)),%0A %5D%0A )%0Adef test_file_phase_1_action(in_str, expected):%0A %22%22%22%0A Test input parsing of phase_1_action.%0A %22%22%22%0A assert file_agent(in_str).phase_1_action(content()) == expected%0A%0A%[email protected](%0A %22in_str, expected%22,%0A %5B%0A (%22key%5Cn2%5Cn%22, (action.Nightmare.by_key, %7B%22idx%22: 2%7D)),%0A (%22door%5Cn3%5Cn%22, (action.Nightmare.by_door, %7B%22idx%22: 3%7D)),%0A (%22hand%5Cn%22, (action.Nightmare.by_hand, %7B%7D)),%0A (%22deck%5Cn%22, (action.Nightmare.by_deck, %7B%7D)),%0A %5D%0A )%0Adef test_file_nightmare_action(in_str, expected):%0A %22%22%22%0A Test input parsing of nightmare action.%0A %22%22%22%0A assert file_agent(in_str).nightmare_action(content()) == expected%0A%[email protected](%0A %22in_str, expected%22,%0A %5B%0A (%22yes%5Cn%22, True),%0A (%22no%5Cn%22, False),%0A %5D%0A )%0Adef test_file_open_door(in_str, expected):%0A %22%22%22%0A Test input parsing of open door.%0A %22%22%22%0A assert file_agent(in_str).open_door(content(), None) == expected%0A%0A%0A#def test_file_key_discard_react(in_str, expected):%0A#TODO%0A
c67e1af4f765f143cb1b8420e053c1a9f00edd05
Add migrations for new statuses.
course_discovery/apps/course_metadata/migrations/0168_auto_20190404_1733.py
course_discovery/apps/course_metadata/migrations/0168_auto_20190404_1733.py
Python
0
@@ -0,0 +1,1732 @@ +# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.15 on 2019-04-04 17:33%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0Aimport django.db.models.manager%0Aimport djchoices.choices%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('course_metadata', '0167_auto_20190403_1606'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterModelManagers(%0A name='course',%0A managers=%5B%0A ('everything', django.db.models.manager.Manager()),%0A %5D,%0A ),%0A migrations.AlterModelManagers(%0A name='courseentitlement',%0A managers=%5B%0A ('everything', django.db.models.manager.Manager()),%0A %5D,%0A ),%0A migrations.AlterModelManagers(%0A name='courserun',%0A managers=%5B%0A ('everything', django.db.models.manager.Manager()),%0A %5D,%0A ),%0A migrations.AlterModelManagers(%0A name='seat',%0A managers=%5B%0A ('everything', django.db.models.manager.Manager()),%0A %5D,%0A ),%0A migrations.AlterField(%0A model_name='courserun',%0A name='status',%0A field=models.CharField(choices=%5B('published', 'Published'), ('unpublished', 'Unpublished'), ('reviewed', 'Reviewed'), ('review_by_legal', 'Awaiting Review from Legal'), ('review_by_internal', 'Awaiting Internal Review')%5D, db_index=True, default='unpublished', max_length=255, validators=%5Bdjchoices.choices.ChoicesValidator(%7B'published': 'Published', 'review_by_internal': 'Awaiting Internal Review', 'review_by_legal': 'Awaiting Review from Legal', 'reviewed': 'Reviewed', 'unpublished': 'Unpublished'%7D)%5D),%0A ),%0A %5D%0A
d308874989667f36da1638f22d6b2d7e823b5ebd
Add script to extract reads or alignments matching a barcode.
extract-barcode.py
extract-barcode.py
Python
0
@@ -0,0 +1,1714 @@ +%22%22%22%0Acode to extract a single cell from a set of alignments or reads marked via Valentine's umis %0Arepository:%0Ahttps://github.com/vals/umis%0A%22%22%22%0Aimport regex as re%0Aimport sys%0Afrom argparse import ArgumentParser%0Afrom pysam import AlignmentFile%0A%0Adef extract_barcode(sam, barcode):%0A%0A parser_re = re.compile('.*:CELL_(?P%3CCB%3E.*):UMI_(?P%3CMB%3E.*)')%0A sam_file = AlignmentFile(sam, mode='r')%0A filter_file = AlignmentFile(%22-%22, mode='wh', template=sam_file)%0A track = sam_file.fetch(until_eof=True)%0A for i, aln in enumerate(track):%0A if aln.is_unmapped:%0A continue%0A match = parser_re.match(aln.qname)%0A CB = match.group('CB')%0A if CB == barcode:%0A filter_file.write(aln)%0A%0Adef stream_fastq(file_handler):%0A ''' Generator which gives all four lines if a fastq read as one string%0A '''%0A next_element = ''%0A for i, line in enumerate(file_handler):%0A next_element += line%0A if i %25 4 == 3:%0A yield next_element%0A next_element = ''%0A%0Adef extract_barcode_fastq(fastq, barcode):%0A parser_re = re.compile('.*:CELL_(?P%3CCB%3E.*):UMI_(?P%3CMB%3E.*)')%0A fastq_file = stream_fastq(open(fastq))%0A for read in fastq_file:%0A match = parser_re.match(read)%0A CB = match.group('CB')%0A if CB == barcode:%0A%09 sys.stdout.write(read)%0A%0Aif __name__ == %22__main__%22:%0A parser = ArgumentParser(%22extract reads/alignments from a single cell%22)%0A parser.add_argument(%22file%22, help=%22A SAM or FASTQ file%22)%0A parser.add_argument(%22barcode%22, help=%22barcode of the cell to extract%22)%0A args = parser.parse_args()%0A extract_fn = extract_barcode_sam if args.file.endswith(%22.sam%22) else extract_barcode_fastq%0A extract_fn(args.file, args.barcode)%0A
048d0d7ce30b66af8bf48bcb0cb7f8bfb90fff0c
Add tests for Part, Pin, Bus and Net iterators.
tests/test_iters.py
tests/test_iters.py
Python
0
@@ -0,0 +1,877 @@ +import pytest%0Afrom skidl import *%0Afrom .setup_teardown import *%0A%0Adef test_iters_1():%0A %22%22%22Test bus iterator.%22%22%22%0A b_size = 4%0A b = Bus('chplx', b_size)%0A for hi in b:%0A for lo in b:%0A if hi != lo:%0A led = Part('device','LED')%0A hi += led%5B'A'%5D%0A lo += led%5B'K'%5D%0A for l in b:%0A assert(len(l) == 2 * (b_size-1))%0A%0Adef test_iters_2():%0A %22%22%22Test pin iterator.%22%22%22%0A q = Part('device','Q_NPN_CEB')%0A s = 0%0A for p1 in q:%0A for p2 in q:%0A if p1 != p2:%0A s += 1%0A assert(s == len(q) * (len(q)-1))%0A%0Adef test_iters_3():%0A %22%22%22Test net iterator.%22%22%22%0A b = Net()%0A for hi in b:%0A for lo in b:%0A if hi != lo:%0A led = Part('device','LED')%0A hi += led%5B'A'%5D%0A lo += led%5B'K'%5D%0A for l in b:%0A assert(len(l) == 0)%0A
60fbfa0b440a762fd25f19148313f5ba27d619aa
add a testing file
DataStructures/Trees/main.py
DataStructures/Trees/main.py
Python
0.000001
@@ -0,0 +1,82 @@ +import BST%0A%0A#Environent for testing BST%0A%0Adef main():%0A%0A print 'Testing'%0A%0Amain()%0A
00aad4a302518400dbb936c7e2ce1d7560c5762f
Add files via upload
src/que_.py
src/que_.py
Python
0
@@ -0,0 +1,1697 @@ +class our_queue(object):%0A def __init__(self):%0A %22%22%22initializes queue%22%22%22%0A self.head = self%0A self.tail = self%0A self.next_node = None%0A self.data = None%0A self.size = 0%0A%0A def enqueue(self, val):%0A %22%22%22creates new node, pushes it to bottom of the queue and makes it the tail%22%22%22%0A self.size += 1%0A new_qu = our_queue()%0A if self.head.data is None:%0A self.head = new_qu%0A self.head.next_node = None%0A else:%0A self.tail.next_node = new_qu%0A new_qu.data = val%0A self.tail = new_qu%0A return self.head%0A%0A%0A def dequeue(self):%0A %22%22%22%0A Removes the head of the queue and returns the value.%0A New head is established.%0A %22%22%22%0A current = self.head%0A temp_data = None%0A try:%0A temp_data = current.data%0A if temp_data is None:%0A raise IndexError('que is empty')%0A self.head = current.next_node%0A self.size -= 1%0A return temp_data%0A except AttributeError:%0A raise IndexError('que is empyt')%0A%0A def peek(self):%0A %22%22%22%0A peeks at the data of the head%0A %22%22%22%0A current = self.head%0A temp_data = None%0A try:%0A temp_data = current.data%0A if temp_data is None:%0A raise IndexError('que is empty')%0A return temp_data%0A except AttributeError:%0A raise IndexError('que is empty')%0A%0A%0A def __len__(self):%0A %22%22%22returns the length of the double linked list%22%22%22%0A length = self.size%0A return length%0Atemp = our_queue()%0Atemp.enqueue(4)%0Atemp.enqueue(3)%0Aprint(len(temp))%0A
ccc663b3a96268dcdf2256d461a11d845a1044a1
Add the original test case of bug #1469629, formatted according to local conventions.
Lib/test/leakers/test_dictself.py
Lib/test/leakers/test_dictself.py
Python
0
@@ -0,0 +1,232 @@ +'''Test case for %22self.__dict__ = self%22 circular reference bug (#1469629)'''%0A%0Aimport gc%0A%0Aclass LeakyDict(dict):%0A pass%0A%0Adef leak():%0A ld = LeakyDict()%0A ld.__dict__ = ld%0A del ld%0A gc.collect(); gc.collect(); gc.collect()%0A
ceaabf80649a8a83c6ddfc548a3fa369c973e5c6
Complete alg fizzbuzz
alg_fizzbuzz.py
alg_fizzbuzz.py
Python
0.99999
@@ -0,0 +1,429 @@ +from __future__ import absolute_import%0Afrom __future__ import division%0Afrom __future__ import print_function%0A%0A%0Adef fizzbuzz(n):%0A%09ls = %5B%5D%0A%09for i in range(1, n + 1):%0A%09%09if i %25 15 == 0:%0A%09%09%09ls.append('fizzbuzz')%0A%09%09elif i %25 3 == 0:%0A%09%09%09ls.append('fizz')%0A%09%09elif i %25 5 == 0:%0A%09%09%09ls.append('buzz')%0A%09%09else:%0A%09%09%09ls.append(i)%0A%09return ls%0A%0A%0Adef main():%0A%09n = 100%0A%09fizzbuzz_ls = fizzbuzz(n)%0A%09print(fizzbuzz_ls)%0A%0A%0Aif __name__ == '__main__':%0A%09main()%0A
e670901ebaf7422f7a71f78a3dc94730eba5605b
Add a module full of hinting helpers.
fmn/lib/hinting.py
fmn/lib/hinting.py
Python
0
@@ -0,0 +1,2162 @@ +%22%22%22 Helpers for %22datanommer hints%22 for rules.%0A%0ARules can optionally define a %22hint%22 for a datanommer query. For%0Ainstance, if a rule has to do with filtering for bodhi messages, then a%0Aprovided hint could be %7B'category': 'bodhi'%7D. This simply speeds up the%0Aprocess of looking for potential message matches in the history by%0Aletting the database server do some of the work for us. Without this, we%0Ahave to comb through literally every message ever and then try to see%0Awhat matches and what doesn't in python-land: Slow!%0A%0ARules define their hints with the @hint decorator defined here.%0A%0AWhen querying datanommer, the %60%60gather_hinting%60%60 helper here can be used to%0Aconstruct the hint dict for %60%60datanommer.grep(..., **hints)%60%60.%0A%22%22%22%0A%0Aimport collections%0Aimport functools%0A%0Aimport fedmsg.config%0A%0A%0Adef hint(invertible=True, **hints):%0A %22%22%22 A decorator that can optionally hang datanommer hints on a rule. %22%22%22%0A def wrapper(fn):%0A @functools.wraps(fn)%0A def replacement(*args, **kwargs):%0A return fn(*args, **kwargs)%0A%0A # Hang hints on the function.%0A replacement.hints = hints%0A replacement.hinting_invertible = invertible%0A return replacement%0A%0A return wrapper%0A%0A%0Adef prefixed(topic, prefix='org.fedoraproject'):%0A config = fedmsg.config.load_config() # This is memoized for us.%0A return '.'.join(%5Bprefix, config%5B'environment'%5D, topic%5D)%0A%0A%0Adef gather_hinting(filter, valid_paths):%0A %22%22%22 Construct hint arguments for datanommer from a filter. %22%22%22%0A%0A hinting = collections.defaultdict(list)%0A for rule in filter.rules:%0A root, name = rule.code_path.split(':', 1)%0A info = valid_paths%5Broot%5D%5Bname%5D%0A for key, value in info%5B'datanommer-hints'%5D.items():%0A%0A # If the rule is inverted, but the hint is not invertible, then%0A # there is no hinting we can provide. Carry on.%0A if rule.negated and not info%5B'hints-invertible'%5D:%0A continue%0A%0A # Otherwise, construct the inverse hint if necessary%0A if rule.negated:%0A key = 'not_' + key%0A%0A # And tack it on.%0A hinting%5Bkey%5D += value%0A%0A return hinting%0A
1d31feb4fadadc377fbb3cf0f18c38f5a8d39aca
disable tray icon when fail
launcher/1.2.0/start.py
launcher/1.2.0/start.py
#!/usr/bin/env python # coding:utf-8 import os, sys current_path = os.path.dirname(os.path.abspath(__file__)) python_path = os.path.abspath( os.path.join(current_path, os.pardir, os.pardir, 'python27', '1.0')) noarch_lib = os.path.abspath( os.path.join(python_path, 'lib', 'noarch')) sys.path.append(noarch_lib) if sys.platform == "linux" or sys.platform == "linux2": from gtk_tray import sys_tray elif sys.platform == "win32": current_path = os.path.dirname(os.path.abspath(__file__)) sys.path.append(current_path) from win_tray import sys_tray elif sys.platform == "darwin": darwin_lib = os.path.abspath( os.path.join(python_path, 'lib', 'darwin')) sys.path.append(darwin_lib) extra_lib = "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python" sys.path.append(extra_lib) osx_lib = os.path.join(python_path, 'lib', 'osx') sys.path.append(osx_lib) from mac_tray import sys_tray else: from non_tray import sys_tray import atexit import logging import webbrowser import web_control import module_init import update import config import setup_win_python def exit_handler(): print 'Stopping all modules before exit!' module_init.stop_all() web_control.stop() atexit.register(exit_handler) def main(): # change path to launcher global __file__ __file__ = os.path.abspath(__file__) if os.path.islink(__file__): __file__ = getattr(os, 'readlink', lambda x: x)(__file__) os.chdir(os.path.dirname(os.path.abspath(__file__))) web_control.confirm_xxnet_exit() setup_win_python.check_setup() module_init.start_all_auto() web_control.start() #config.load() if config.get(["modules", "launcher", "popup_webui"], 1) == 1: webbrowser.open("http://127.0.0.1:8085/") update.start() sys_tray.serve_forever() module_init.stop_all() sys.exit() if __name__ == '__main__': current_path = os.path.dirname(os.path.abspath(__file__)) version = current_path.split(os.path.sep)[-1] logging.info("launcher version: %s", version) try: main() except KeyboardInterrupt: # Ctrl + C on console sys.exit
Python
0.000001
@@ -915,16 +915,79 @@ -from mac +try:%0A from mac_tray import sys_tray%0A except:%0A from non _tra
719dd9064904d2e94cacd5c9ab349b0658344294
Create weather_proc.py
tmp/weather_proc.py
tmp/weather_proc.py
Python
0.000047
@@ -0,0 +1,1466 @@ +import argparse%0Afrom datetime import datetime%0Aimport numpy as np%0A%0A# timeslot indexing funtion%0Adef get_time_index(timestamp):%0A day = int(timestamp.date().day) - 1%0A slot = int((timestamp.time().hour * 3600 + timestamp.time().minute * 60 + timestamp.time().second) / 600)%0A return day * 144 + slot%0A%0Aap = argparse.ArgumentParser()%0Aap.add_argument(%22-w%22, %22--weather%22, required=True, help=%22Path to the weather data file%22)%0Aap.add_argument(%22-o%22, %22--output%22, required=True, help=%22Path to the output file%22)%0Aargs = vars(ap.parse_args())%0A%0Atotal_timeslots = 19 * 144%0Aweather_dataset = np.zeros((total_timeslots, 11), dtype=%22float%22)%0A%0Aprint('reading weather')%0Aweather_file = open(args%5B'weather'%5D, 'r')%0Afor line in weather_file:%0A weather_data = line.split('%5Ct')%0A time_key = get_time_index(datetime.strptime(weather_data%5B0%5D.strip(), '%25Y-%25m-%25d %25H:%25M:%25S'))%0A%0A if time_key %3E total_timeslots:%0A continue%0A%0A climate = int(weather_data%5B1%5D.strip())%0A temperature = float(weather_data%5B2%5D.strip())%0A pollution = float(weather_data%5B3%5D.strip())%0A%0A weather_dataset%5Btime_key%5D%5Bclimate - 1%5D += 1.%0A weather_dataset%5Btime_key%5D%5B9%5D += temperature%0A weather_dataset%5Btime_key%5D%5B10%5D += pollution%0Aweather_file.close()%0A%0A%0Acount = np.sum(weather_dataset%5B:, 0:9%5D, axis=1)%0Acount%5B count == 0 %5D = 1.;%0A%0Aweather_dataset%5B:, 9%5D = weather_dataset%5B:, 9%5D / count%0Aweather_dataset%5B:, 10%5D = weather_dataset%5B:, 10%5D / count%0Anp.savetxt(args%5B%22output%22%5D, weather_dataset, delimiter=',', fmt='%25f')%0A
9f2e4aad6d3a4004e80378f44aa178b37dd6da57
add ShellExecError
tpl/errors.py
tpl/errors.py
Python
0.000001
@@ -0,0 +1,400 @@ +# -*- coding:utf-8 -*-%0A%0Afrom gettext import gettext as _%0A%0A%0Aclass BaseError(BaseException):%0A ERR_MSG = _('')%0A%0A%0Aclass ShellExecError(BaseError):%0A ERR_MSG = _('Command exit code not zero. %5CnExit Code:%5Cn%7B%7D.%5CnOut:%5Cn%7B%7D%5CnErr:%5Cn%7B%7D')%0A%0A def __init__(self, exit_code, out, err):%0A self.message = self.ERR_MSG.format(exit_code, out, err)%0A super(ShellExecError, self).__init__(self.message)%0A
3d027df005725cbc5dfbba0262b0c52c5392d7f0
Add whoami resource which decodes token and returns user info from token
app/resources/check_token.py
app/resources/check_token.py
Python
0
@@ -0,0 +1,1173 @@ +from flask import make_response, jsonify%0Afrom flask_restful import Resource, reqparse, marshal, fields%0Afrom app.models import User%0Afrom app.common.auth.token import JWT%0A%0Auser_fields = %7B%0A %22id%22: fields.Integer,%0A %22username%22: fields.String,%0A %22created_at%22: fields.DateTime%0A%7D%0A%0Aclass WhoAmIResource(Resource):%0A %22%22%22 This class takes a token from the Authorization header%0A and then returns the user info for the token if its valid%0A %22%22%22%0A def __init__(self):%0A self.parser = reqparse.RequestParser()%0A self.parser.add_argument(%22Authorization%22,%0A location=%22headers%22,%0A required=True)%0A%0A def get(self):%0A %22%22%22 get method %22%22%22%0A args = self.parser.parse_args()%0A token = args%5B%22Authorization%22%5D # get token from header%0A%0A try:%0A user_id = int(JWT.decode_token(token))%0A user = User.query.get(user_id)%0A%0A return marshal(user, user_fields), 200%0A%0A except ValueError:%0A return make_response(jsonify(%7B%0A %22status%22: %22failed%22,%0A %22message%22: %22Invalid token, please login again%22%0A %7D), 401)%0A
62484ca423d6adfa19a581d7b74472e8475cf817
Create findbro.py
findbro/findbro.py
findbro/findbro.py
Python
0.000001
@@ -0,0 +1,1858 @@ +# findbro.py v0.1%0A# Matches Bro logs against a specified list of UIDs%0A# Can run on N number of Bro logs%0A# Performs no error checking%0A# Should only be run on directories that contains only gzip Bro logs%0A# Best way to collect UIDs is via bro-cut and grep%0A# %0A# Josh Liburdi 2016%0A%0Afrom os import listdir%0Aimport sys%0Aimport gzip%0Aimport argparse%0A%0Adef write_file(fout_name,file_contents):%0A fout = gzip.open(fout_name, 'w')%0A fout.write(file_contents)%0A fout.close()%0A%0Adef proc_bro(fout_name,input,uid_list):%0A file_cache = ''%0A with gzip.open(input) as fin:%0A lines = fin.readlines()%0A file_cache += lines%5B6%5D%0A file_cache += lines%5B7%5D%0A for line in lines%5B8:-1%5D:%0A if any(uid in line for uid in uid_list): %0A file_cache += line%0A%0A if len(file_cache.split('%5Cn')) == 3:%0A print 'No matches in %25s' %25 input%0A else:%0A print '%25d matches in %25s' %25 ( (len(file_cache.split('%5Cn')) - 3), input )%0A write_file(fout_name,file_cache)%0A%0Adef main():%0A parser = argparse.ArgumentParser(description='Merge Bro logs from a single day')%0A parser.add_argument('--bro-dir', '-bd', dest='directory', action='store')%0A parser.add_argument('--label', '-l', dest='label', action='store', default=None)%0A parser.add_argument('--uid', '-u', dest='uid_file', action='store')%0A argsout = parser.parse_args()%0A%0A dir_list = listdir(argsout.directory)%0A log_dict = %7B%7D%0A uid_list = %5Bline.strip() for line in open(argsout.uid_file, 'r')%5D%0A%0A for log_file in dir_list:%0A log_type = log_file.split('.')%5B0%5D%0A log_dict.setdefault(log_type,%5B%5D).append(log_file)%0A%0A for key,list_val in log_dict.iteritems():%0A if argsout.label is None:%0A fout_name = key + '.log.gz' %0A else:%0A fout_name = key + '.' + argsout.label + '.log.gz'%0A %0A for f in list_val:%0A fpath = argsout.directory + f%0A proc_bro(fout_name,fpath,uid_list)%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
f34dabd23faa7d50e507b829e576c1968bdc2d52
Print The Message Happy New Year
src/iterations/exercise3.py
src/iterations/exercise3.py
Python
0.000028
@@ -0,0 +1,364 @@ +# Print The Message %22Happy new Year%22 followed by the name of a person%0A# taken from a list for all people mentioned in the list.%0A%0Adef print_Happy_New_Year_to( listOfPeople ):%0A%09%0A%09for user in listOfPeople:%0A%09%09print 'Happy New Year, ', user%0A%0A%09print 'Done!'%0A%0Adef main( ):%0A%0A%09listOfPeople=%5B'John', 'Mary', 'Luke'%5D%0A%09print_Happy_New_Year_to( listOfPeople )%0A%09quit(0)%0A%0Amain( )
67cb63bcb776b1a89d8e96a7b90c02724ef5b0b6
update migrations
sweettooth/extensions/migrations/0005_auto_20190112_1733.py
sweettooth/extensions/migrations/0005_auto_20190112_1733.py
Python
0.000023
@@ -0,0 +1,774 @@ +# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.18 on 2019-01-12 17:33%0Afrom __future__ import unicode_literals%0A%0Aimport autoslug.fields%0Afrom django.db import migrations, models%0Aimport sweettooth.extensions.models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('extensions', '0004_auto_20181216_2102'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='extension',%0A name='icon',%0A field=models.ImageField(blank=True, default='', upload_to=sweettooth.extensions.models.make_icon_filename),%0A ),%0A migrations.AlterField(%0A model_name='extension',%0A name='slug',%0A field=autoslug.fields.AutoSlugField(editable=False, populate_from='name'),%0A ),%0A %5D%0A
3aa6ba18655a92753f33622ac80be66eb3b69ff6
Add useful python functions
device_resolutions.py
device_resolutions.py
Python
0.000043
@@ -0,0 +1,1612 @@ +from math import sqrt%0Aimport csv%0A%0Adef as_orientation(x, y, is_portrait=False):%0A if is_portrait:%0A return (y, x) if x %3E y else (x, y)%0A else:%0A return (x, y) if x %3E y else (y, x)%0A%0Adef as_portrait(x, y):%0A %22%22%22Given a dimensions, return that pair in portrait orientation%22%22%22%0A return as_orientation(x, y, is_portrait=True)%0A%0Adef as_landscape(x, y):%0A %22%22%22Given a dimensions, return that pair in landscape orientation%22%22%22%0A return as_orientation(x, y, is_portrait=False)%0A%0Adef calc_hypotenuse(a, b):%0A return sqrt(a**2 + b**2)%0A%0Adef calc_ppi(width_px, height_px, hypotenuse_in):%0A %22%22%22%0A Given the diagnonal measurement of the screen in inches (%60hypotenuse_in%60),%0A calculate the pixels-per-inch (ppi) offered by the screen.%0A %22%22%22%0A hypotenuse_px = calc_hypotenuse(width_px, height_px)%0A return hypotenuse_px / hypotenuse_in%0A%0A# @TODO port to CSV%0ACOMMON_ASPECT_RATIOS = (%0A (3, 4, %223:4%22),%0A (1, 1, %221:1%22),%0A (5, 4, %225:4%22),%0A (4, 3, %224:3%22),%0A (1.43, 1, %22IMAX 1.43:1%22),%0A (3, 2, %223:2%22),%0A (5, 3, %225:3%22)%0A (14, 9, %2214:9%22),%0A (16, 10, %2216:10%22),%0A (16, 9, %2216:9%22),%0A (17, 9, %2217:9%22),%0A (21, 9, %2221:9%22),%0A (1.375, 1, %22Academy Ratio 1.375:1%22),%0A (2.35, 1, %22CinemaScope 2.35:1%22),%0A (2.59, 1, %22Cinemara 2.59:1%22),%0A (2.75, 1, %22Ultra Panavision 70 2.75:1%22),%0A (2.76, 1, %22MGM 65 2.76:1%22)%0A)%0A%0Adef find_aspect_ratio(x, y):%0A %22%22%22%0A Given an aspect ratio, find an aspect ratio description using a list%0A of common aspect ratios.%0A %22%22%22%0A ratio = x / y%0A for cx, cy, name in COMMON_ASPECT_RATIOS:%0A if ratio == (cx/cy):%0A return (ratio, cx, cy, name)%0A return (ratio, ratio, 1, %22%22)
dad5f0a06dd057eccde5a086c84d5c639bb74ae9
Add back peaks for backwards compatibility with a deprecation warning.
dipy/reconst/peaks.py
dipy/reconst/peaks.py
Python
0
@@ -0,0 +1,210 @@ +import warnings%0A%0Aw_s = %22The module 'dipy.reconst.peaks' is deprecated.%22%0Aw_s += %22 Please use the module 'dipy.direction.peaks' instead%22%0Awarnings.warn(w_s, DeprecationWarning)%0A%0Afrom dipy.direction.peaks import *%0A
52a8a0c0def2930667155660c8844bb6836f9ff5
add script for table of orders/country
scripts/country_order_stats.py
scripts/country_order_stats.py
Python
0
@@ -0,0 +1,529 @@ +import sqlite3%0Aimport pandas as pd%0A%0ATICKET_SALE_START_DATE = '2016-01-01'%0A%0Aconn = sqlite3.connect('data/site/p3.db')%0A%0Ac = conn.cursor()%0A%0Aquery = c.execute(%22%22%22%0ASELECT ORDER_ID, COUNTRY_ID%0AFROM assopy_orderitem, assopy_order%0AWHERE assopy_orderitem.order_id == assopy_order.id AND %0Aassopy_order.created %3E= date(TICKET_SALE_START_DATE)%22%22%22%22)%0A%0Acountries = query.fetchall()%0A%0Adf = pd.DataFrame(countries, columns=%5B'order_id', 'country'%5D)%0A%0Acounts = df.groupby('country').count().sort_values(by='order_id', ascending=False)%0A%0Aprint(counts)%0A
696b9d1177d24ca6c455052f15e529f4952196a0
add test
@test/test_lang_with.py
@test/test_lang_with.py
Python
0.000002
@@ -0,0 +1,1408 @@ +# -*- coding: utf-8 -*-%0A#%0A# Copyright (c) 2018~2999 - Cologler %[email protected]%3E%0A# ----------%0A#%0A# ----------%0A%0Afrom jasily.lang import with_it, with_objattr, with_objattrs%0A%0Aclass SomeLock:%0A def __init__(self):%0A self.locked = False%0A%0A def __enter__(self):%0A self.locked = True%0A%0A def __exit__(self, *args):%0A self.locked = False%0A%0Adef test_with_it():%0A lock = SomeLock()%0A @with_it(lock)%0A def func():%0A assert lock.locked%0A return 1%0A assert not lock.locked%0A assert func() == 1%0A assert not lock.locked%0A%0Adef test_with_objattr():%0A class X:%0A def __init__(self):%0A self.some_lock = SomeLock()%0A%0A @with_objattr('some_lock')%0A def func(self):%0A assert self.some_lock.locked%0A return 1%0A%0A x = X()%0A assert not x.some_lock.locked%0A assert x.func() == 1%0A assert not x.some_lock.locked%0A%0Adef test_with_objattrs():%0A class X:%0A def __init__(self):%0A self.some_lock_1 = SomeLock()%0A self.some_lock_2 = SomeLock()%0A%0A @with_objattrs('some_lock_1', 'some_lock_2')%0A def func(self):%0A assert self.some_lock_1.locked%0A assert self.some_lock_2.locked%0A return 1%0A%0A x = X()%0A assert not x.some_lock_1.locked%0A assert not x.some_lock_2.locked%0A assert x.func() == 1%0A assert not x.some_lock_1.locked%0A assert not x.some_lock_2.locked%0A
791ce2275933f16cf483dad1b16948441292e61c
add hook for google-api-python-client (#3965)
scripts/hooks/hook-pydrive2.py
scripts/hooks/hook-pydrive2.py
Python
0
@@ -0,0 +1,136 @@ +from PyInstaller.utils.hooks import copy_metadata%0A%0Adatas = copy_metadata(%22pydrive2%22)%0Adatas += copy_metadata(%22google-api-python-client%22)%0A
534db68d8f773c459788650590b6585fc0369e19
create a default permission handler for ObjectOwner
apps/Localizr/permissions.py
apps/Localizr/permissions.py
Python
0
@@ -0,0 +1,304 @@ +from rest_framework.permissions import IsAuthenticated, SAFE_METHODS%0A%0A%0Aclass IsObjectOwner(IsAuthenticated):%0A%0A%09def has_object_permission(self, request, view, obj):%0A%09%09if request.method in SAFE_METHODS:%0A%09%09%09return True%0A%09%09if hasattr(obj, 'created_by'):%0A%09%09%09return obj.created_by == request.user%0A%09%09return False
f7d3ca5d537140e07ff95d082f2a78e86bc06604
Add flip
zl/indicators/flip.py
zl/indicators/flip.py
Python
0.000053
@@ -0,0 +1,2124 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4%0A%0A# Copyright 2013 Jason Koelker%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0Aimport collections%0Aimport numbers%0A%0Afrom zipline.transforms import utils as transforms%0A%0A%0ABULL = 'Bull'%0ABEAR = 'Bear'%0A%0A%0Aclass Flip(object):%0A __metaclass__ = transforms.TransformMeta%0A%0A def __init__(self, period=4, setup_price='close_price'):%0A self.period = period%0A self.setup_price = setup_price%0A self.sid_windows = collections.defaultdict(self.create_window)%0A%0A def create_window(self):%0A return FlipWindow(self.period, self.setup_price)%0A%0A def update(self, event):%0A window = self.sid_windows%5Bevent.sid%5D%0A window.update(event)%0A return window()%0A%0A%0Aclass FlipWindow(transforms.EventWindow):%0A def __init__(self, period, setup_price):%0A transforms.EventWindow.__init__(self, window_length=period + 2)%0A%0A self.period = period%0A self.setup_price = setup_price%0A%0A def handle_add(self, event):%0A assert hasattr(event, self.setup_price)%0A value = getattr(event, self.setup_price, None)%0A assert isinstance(value, numbers.Number)%0A%0A def handle_remove(self, event):%0A pass%0A%0A def __call__(self):%0A if len(self.ticks) %3C self.window_length:%0A return%0A%0A Yp = getattr(self.ticks%5B-1%5D, self.setup_price)%0A Xp = getattr(self.ticks%5B-2%5D, self.setup_price)%0A X = getattr(self.ticks%5B0%5D, self.setup_price)%0A Y = getattr(self.ticks%5B1%5D, self.setup_price)%0A%0A if (Xp %3E X) and (Yp %3C Y):%0A return BEAR%0A if (Xp %3C X) and (Yp %3E Y):%0A return BULL%0A
4cea1c1231da1583fb177e976f473fa52b9ec450
Fix SelectJmes documentation
scrapy/loader/processors.py
scrapy/loader/processors.py
""" This module provides some commonly used processors for Item Loaders. See documentation in docs/topics/loaders.rst """ from scrapy.utils.misc import arg_to_iter from scrapy.utils.datatypes import MergeDict from .common import wrap_loader_context class MapCompose(object): def __init__(self, *functions, **default_loader_context): self.functions = functions self.default_loader_context = default_loader_context def __call__(self, value, loader_context=None): values = arg_to_iter(value) if loader_context: context = MergeDict(loader_context, self.default_loader_context) else: context = self.default_loader_context wrapped_funcs = [wrap_loader_context(f, context) for f in self.functions] for func in wrapped_funcs: next_values = [] for v in values: next_values += arg_to_iter(func(v)) values = next_values return values class Compose(object): def __init__(self, *functions, **default_loader_context): self.functions = functions self.stop_on_none = default_loader_context.get('stop_on_none', True) self.default_loader_context = default_loader_context def __call__(self, value, loader_context=None): if loader_context: context = MergeDict(loader_context, self.default_loader_context) else: context = self.default_loader_context wrapped_funcs = [wrap_loader_context(f, context) for f in self.functions] for func in wrapped_funcs: if value is None and self.stop_on_none: break value = func(value) return value class TakeFirst(object): def __call__(self, values): for value in values: if value is not None and value != '': return value class Identity(object): def __call__(self, values): return values class SelectJmes(object): """ Query the input string for the jmespath (given at instantiation), and return the answer Requires : jmespath(https://github.com/jmespath/jmespath) Note: SelectJmes accepts only one input element at a time. """ def __init__(self, json_path): self.json_path = json_path import jmespath self.compiled_path = jmespath.compile(self.json_path) def __call__(self, value): """Query value for the jmespath query and return answer :param str value: a string with JSON data to extract from :return: Element extracted according to jmespath query """ return self.compiled_path.search(value) class Join(object): def __init__(self, separator=u' '): self.separator = separator def __call__(self, values): return self.separator.join(values)
Python
0
@@ -2497,12 +2497,8 @@ ram -str valu @@ -2506,29 +2506,35 @@ : a -string with JSON data +data structure (dict, list) to
e07cc0ea6e56339d117fd5d81c0939b0c658727e
Create cnn.py
Classifying_datasets/Convolutional_Neural_Networks/Convolutional_Neural_Networks/cnn.py
Classifying_datasets/Convolutional_Neural_Networks/Convolutional_Neural_Networks/cnn.py
Python
0.000003
@@ -0,0 +1,2101 @@ +# Convolutional Neural Network%0A%0A# Part 1 - Building the CNN%0A%0A# Importing the Keras libraries and packages%0Afrom keras.models import Sequential%0Afrom keras.layers import Conv2D%0Afrom keras.layers import MaxPooling2D%0Afrom keras.layers import Flatten%0Afrom keras.layers import Dense%0A%0A# Initialising the CNN%0Aclassifier = Sequential()%0A%0A# Step 1 - Convolution%0Aclassifier.add(Conv2D(64, (3, 3), input_shape = (64, 64, 3), activation = 'relu'))%0A%0A# Step 2 - Pooling%0Aclassifier.add(MaxPooling2D(pool_size = (2, 2)))%0A%0A# Adding a second convolutional layer%0A%0Aclassifier.add(Conv2D(64, (3, 3), activation = 'relu'))%0A%0Aclassifier.add(MaxPooling2D(pool_size = (2, 2)))%0A%0A# Step 3 - Flattening%0Aclassifier.add(Flatten())%0A%0A# Step 4 - Full connection%0Aclassifier.add(Dense(units = 512, activation = 'relu'))%0Aclassifier.add(Dense(units = 1, activation = 'sigmoid'))%0A%0A# Compiling the CNN%0Aclassifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = %5B'accuracy'%5D)%0A%0A# Part 2 - Fitting the CNN to the images%0A%0Afrom keras.preprocessing.image import ImageDataGenerator%0A%0Atrain_datagen = ImageDataGenerator(rescale = 1./255,%0A shear_range = 0.2,%0A zoom_range = 0.2,%0A horizontal_flip = True)%0A%0Atest_datagen = ImageDataGenerator(rescale = 1./255)%0A%0Atraining_set = train_datagen.flow_from_directory('dataset/training_set',%0A target_size = (64, 64),%0A batch_size = 32,%0A class_mode = 'binary')%0A%0Atest_set = test_datagen.flow_from_directory('dataset/test_set',%0A target_size = (64, 64),%0A batch_size = 32,%0A class_mode = 'binary')%0A%0Aclassifier.fit_generator(training_set,%0A steps_per_epoch = 8000,%0A epochs = 25,%0A validation_data = test_set,%0A validation_steps = 2000)%0A
5199ee1a544b2aa59895a1b22359d6a9adb765a3
Add .prepare-commit-msg.py
.prepare-commit-msg.py
.prepare-commit-msg.py
Python
0.000011
@@ -0,0 +1,1399 @@ +#!/usr/bin/env python%0A%0A# This script is an optional git hook and will prepend the issue%0A# number to a commit message in the correct format for Github to parse.%0A#%0A# If you wish to use it, create a shortcut to this file in .git/hooks called%0A# 'prepare-commit-msg' e.g. from top folder of your project:%0A# ln -s ../../.prepare-commit-msg.py .git/hooks/prepare-commit-msg%0A#%0A# or, for Windows users:%0A# mklink .git%5Chooks%5Cprepare-commit-msg .prepare-commit-msg.py%0A%0Aimport sys%0Aimport re%0Afrom subprocess import check_output%0A%0A# By default, the hook will check to see if the branch name starts with%0A# 'issue-' and will then prepend whatever follows in the commit message.%0A# e.g. for a branch named 'issue-123', the commit message will start with%0A# '%5B#123%5D'%0A# If you wish to use a diferent prefix on branch names, change it here.%0Aissue_prefix = 'issue-'%0A%0Acommit_msg_filepath = sys.argv%5B1%5D%0Abranch = check_output(%0A %5B'git', 'symbolic-ref', '--short', 'HEAD'%5D%0A).strip().decode(encoding='UTF-8')%0A%0Aif branch.startswith(issue_prefix):%0A issue_number = re.match('%25s(.*)' %25 issue_prefix, branch).group(1)%0A print(%0A f'prepare-commit-msg: Prepending %5B#%7Bissue_number%7D%5D to commit message')%0A%0A with open(commit_msg_filepath, 'r+') as f:%0A content = f.read()%0A f.seek(0, 0)%0A f.write(f'%5B#%7Bissue_number%7D%5D %7Bcontent%7D')%0Aelse:%0A print(%22prepare-commit-msg: No changes made to commit message%22)%0A
ce28c5642c3ab543fc48e2f4f1f0b2f2a62890a2
Add script to extract information for playbook files
src/misc/parse_tool_playbook_yaml.py
src/misc/parse_tool_playbook_yaml.py
Python
0
@@ -0,0 +1,986 @@ +#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0Aimport sys%0Aimport os%0Aimport argparse%0Aimport re%0Aimport yaml%0A%0Adef get_revision_number(yaml_content, tool_name):%0A for tool in yaml_content%5B'tools'%5D:%0A if tool%5B%22name%22%5D == tool_name:%0A if tool.has_key(%22revision%22):%0A print tool%5B%22revision%22%5D%5B0%5D%0A%0Adef get_owner(yaml_content, tool_name):%0A for tool in yaml_content%5B'tools'%5D:%0A if tool%5B%22name%22%5D == tool_name:%0A print tool%5B'owner'%5D%0A%0Aif __name__ == '__main__':%0A parser = argparse.ArgumentParser()%0A parser.add_argument('--file', required=True)%0A parser.add_argument('--tool_name', required=True)%0A parser.add_argument('--tool_function', required=True)%0A args = parser.parse_args()%0A%0A with open(args.file,'r') as yaml_file:%0A yaml_content = yaml.load(yaml_file)%0A%0A functions = %7B%0A 'get_revision_number': get_revision_number,%0A 'get_owner': get_owner%0A %7D%0A%0A functions%5Bargs.tool_function%5D(yaml_content, args.tool_name)
24c763ead7af8a669ff1055b3f352f513274a47f
Insert a note at a specific position in a linked list
all-domains/data-structures/linked-lists/insert-a-node-at-a-specific-positin-in-a-linked-list/solution.py
all-domains/data-structures/linked-lists/insert-a-node-at-a-specific-positin-in-a-linked-list/solution.py
Python
0.000001
@@ -0,0 +1,1406 @@ +# https://www.hackerrank.com/challenges/insert-a-node-at-a-specific-position-in-a-linked-list%0A# Python 2%0A%0A%22%22%22%0A Insert Node at a specific position in a linked list%0A head input could be None as well for empty list%0A Node is defined as%0A%0A class Node(object):%0A%0A def __init__(self, data=None, next_node=None):%0A self.data = data%0A self.next = next_node%0A%0A return back the head of the linked list in the below method.%0A%22%22%22%0A%0A# This is a %22method-only%22 submission.%0A# You only need to complete this method.%0Adef InsertNth(head, data, position):%0A if head is None:%0A return Node(data=data)%0A else:%0A current = head%0A%0A if position == 0:%0A node_to_insert = Node(data=data, next_node=current)%0A return node_to_insert%0A else:%0A prev = None%0A for i in xrange(position):%0A prev = current%0A current = current.next%0A new_node = Node(data=data)%0A prev.next = new_node%0A new_node.next = current%0A return head%0A%0A# def display_linked_list(head):%0A# s = ''%0A# while True:%0A# s += '%7B%7D-%3E'.format(head.data)%0A# if head.next == None:%0A# break%0A# else:%0A# head = head.next%0A# s += 'NULL'%0A# print(s)%0A#%0A#%0A# # LL = Node(1)%0A# c = Node(3)%0A# b = Node(2, c)%0A# head = Node(1, b)%0A#%0A# head = InsertNth(head, 'x', 1)%0A#%0A# display_linked_list(head)%0A%0A
db914944615f16c4b170e7dfd428901d5fc29271
Add test for image.fromstring - refs #1805
tests/python_tests/image_test.py
tests/python_tests/image_test.py
Python
0
@@ -0,0 +1,934 @@ +#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0Aimport sys%0Aimport os, mapnik%0Afrom timeit import Timer, time%0Afrom nose.tools import *%0Afrom utilities import execution_path%0A%0Adef setup():%0A # All of the paths used are relative, if we run the tests%0A # from another directory we need to chdir()%0A os.chdir(execution_path('.'))%0A%0A%0Adef test_image_open_from_string():%0A filepath = '../data/images/dummy.png'%0A im1 = mapnik.Image.open(filepath)%0A im2 = mapnik.Image.fromstring(open(filepath,'rb').read())%0A eq_(im1.width(),im2.width())%0A length = len(im1.tostring())%0A eq_(length,len(im2.tostring()))%0A eq_(len(mapnik.Image.fromstring(im1.tostring('png')).tostring()),length)%0A eq_(len(mapnik.Image.fromstring(im1.tostring('jpeg')).tostring()),length)%0A eq_(len(mapnik.Image.fromstring(im1.tostring('tiff')).tostring()),length)%0A%0Aif __name__ == %22__main__%22:%0A setup()%0A %5Beval(run)() for run in dir() if 'test_' in run%5D%0A
dedcdaf1a55b08c275af29c535a7ae574b8ee5d2
Add 20150517 question.
LeetCode/number_of_islands.py
LeetCode/number_of_islands.py
Python
0.000001
@@ -0,0 +1,1619 @@ +%22%22%22%0AGiven a 2d grid map of '1's (land) and '0's (water), count the number of%0Aislands. An island is surrounded by water and is formed by connecting adjacent%0Alands horizontally or vertically. You may assume all four edges of the grid are%0Aall surrounded by water.%0A%0AExample 1:%0A%0A11110%0A11010%0A11000%0A00000%0AAnswer: 1%0A%0AExample 2:%0A%0A11000%0A11000%0A00100%0A00011%0AAnswer: 3%0A%0ATags: DFS, BFS%0A%0ADifficulty: Medium%0A%22%22%22%0A%0A%0Aclass Solution:%0A # @param %7Bcharacter%5B%5D%5B%5D%7D grid%0A # @return %7Binteger%7D%0A def numIslands(self, grid):%0A if not grid:%0A return 0%0A if not grid%5B0%5D:%0A return 0%0A%0A width = len(grid%5B0%5D)%0A height = len(grid)%0A visited = %5B%5BFalse%5D * width for _ in xrange(height)%5D%0A%0A count = 0%0A i = 0%0A while i %3C height:%0A j = 0%0A while j %3C width:%0A if grid%5Bi%5D%5Bj%5D == '1' and not visited%5Bi%5D%5Bj%5D:%0A self.bfs(grid, visited, %5B(i, j)%5D)%0A count += 1%0A j += 1%0A i += 1%0A return count%0A%0A%0A def bfs(self, grid, visited, to_be_visited):%0A if not to_be_visited:%0A return%0A%0A x, y = to_be_visited.pop()%0A if visited%5Bx%5D%5By%5D or grid%5Bx%5D%5By%5D == '0':%0A return%0A visited%5Bx%5D%5By%5D = True%0A%0A if x %3E 0:%0A to_be_visited.append((x - 1, y))%0A if x %3C len(visited) - 1:%0A to_be_visited.append((x + 1, y))%0A if y %3E 0:%0A to_be_visited.append((x, y - 1))%0A if y %3C len(visited%5B0%5D) - 1:%0A to_be_visited.append((x, y + 1))%0A%0A while to_be_visited:%0A self.bfs(grid, visited, to_be_visited)%0A
bf3378d0749abb499c286d92583218749773e174
change code fami as famille in param
src/scripts/aggregates_all_years.py
src/scripts/aggregates_all_years.py
# -*- coding:utf-8 -*- # # This file is part of OpenFisca. # OpenFisca is a socio-fiscal microsimulation software # Copyright © 2011 Clément Schaff, Mahdi Ben Jelloul # Licensed under the terms of the GVPLv3 or later license # (see openfisca/__init__.py for details) # Script to compute the aggregates for all the referenced years from src.lib.simulation import SurveySimulation from src.plugins.survey.aggregates import Aggregates from src.plugins.survey.inequality import Inequality from pandas import ExcelWriter, ExcelFile import os from src.countries.france.data.sources.config import destination_dir country = 'france' fname_all = "aggregates_inflated_loyers.xlsx" fname_all = os.path.join(destination_dir, fname_all) def get_loyer_inflator(year): xls = ExcelFile('../countries/france/data/sources/loyers.xlsx') df = xls.parse('data', na_values=['NA']) irl_2006 = df[ (df['year'] == 2006) & (df['quarter'] == 1)]['irl'] # print irl_2006 irl = df[ (df['year'] == year) & (df['quarter'] == 1)]['irl'] # print irl return float(irl.values/irl_2006.values) def build_aggregates(): writer = None years = range(2006,2007) for year in years: yr = str(year) # fname = "Agg_%s.%s" %(str(yr), "xls") simu = SurveySimulation() simu.set_config(year = yr, country = country) simu.set_param() simu.set_survey() inflator = get_loyer_inflator(year) simu.inflate_survey({'loyer' : inflator}) simu.compute() agg = Aggregates() agg.set_simulation(simu) agg.compute() if writer is None: writer = ExcelWriter(str(fname_all)) agg.aggr_frame.to_excel(writer, yr, index= False, header= True, float_format="%.2f") print agg.aggr_frame.to_string() del simu del agg import gc gc.collect() writer.save() def diag_aggregates(): years = ['2006', '2007', '2008', '2009'] df_final = None for yr in years: xls = ExcelFile(fname_all) df = xls.parse(yr, hindex_col= True) cols = [u"Mesure", u"Dépense \n(millions d'€)", u"Bénéficiaires \n(milliers)", u"Dépenses \nréelles \n(millions d'€)", u"Bénéficiaires \nréels \n(milliers)", u"Diff. relative \nDépenses", u"Diff. relative \nBénéficiaires"] selected_cols = [u"Mesure", u"Diff. relative \nDépenses", u"Diff. relative \nBénéficiaires"] df = df[selected_cols] df['year'] = yr df['num'] = range(len(df.index)) df = df.set_index(['num', u'Mesure', 'year']) if df_final is None: df_final = df else: df_final = df_final.append(df, ignore_index=False) # DataFrame.groupby() df_final = df_final.sortlevel(0) print str(fname_all)[:-5]+'_diag.xlsx' writer = ExcelWriter(str(fname_all)[:-5]+'_diag.xlsx') df_final.to_excel(writer, sheet_name="diagnostics", float_format="%.2f") writer.save() def test_gini(): """ Compute Gini coefficients """ years = range(2006,2010) for year in years: yr = str(year) # fname = "Agg_%s.%s" %(str(yr), "xls") simu = SurveySimulation() simu.set_config(year = yr, country = country) simu.set_param() simu.set_survey() inflator = get_loyer_inflator(year) simu.inflate_survey({'loyer' : inflator}) simu.compute() inequality = Inequality() inequality.set_simulation(simu) inequality.compute() print inequality.inequality_dataframe print inequality.poverty if __name__ == '__main__': build_aggregates() # diag_aggregates() test_gini()
Python
0.000004
@@ -1175,18 +1175,18 @@ (2006,20 +1 0 -7 )%0A fo
681c67381eef9384845e0041214011797be6ea03
Create text2hex.py
text2hex.py
text2hex.py
Python
0.001615
@@ -0,0 +1,1684 @@ +# Program Name : text2hex%0A# Programmer : The Alpha%0A# Credits : Iranpython.blog.ir%0A# Version : 0.91(Beta Version)%0A# Linted By : Pyflakes%0A# Info : text2hex is a simple tool that uses to convert strings to hex.%0A%0Afrom PyQt4.QtCore import *%0Afrom PyQt4.QtGui import *%0Aimport sys%0Aimport binascii%0A%0Aclass TextToHex(QDialog):%0A%09def __init__(self):%0A%09%09QDialog.__init__(self)%0A%0A%09%09self.setWindowTitle(%22Text2Hex%22)%0A%0A%09%09layout = QGridLayout()%0A%0A%09%09self.label_cp = QLabel(%22%3Cb%3E%3Ccode%3E%3Ch3%3Epystudent copyright%3C/h3%3E%3C/code%3E%3C/b%3E%22)%0A%0A%09%09label_text = QLabel(%22%3Cb%3E%3Ccode%3E%3Ch3%3EText :%3C/h3%3E%3C/code%3E%3C/b%3E%22)%0A%09%09self.line_edit_text = QLineEdit()%0A%0A%09%09label_hex = QLabel(%22%3Cb%3E%3Ccode%3E%3Ch3%3EHex :%3C/h3%3E%3C/code%3E%3C/b%3E%22)%0A%09%09self.line_edit_hex = QLineEdit()%0A%09%09self.line_edit_hex.setReadOnly(True)%0A%0A%09%09self.convert_button = QPushButton(%22Convert%22)%0A%09%09self.exit_button = QPushButton(%22Exit%22)%0A%0A%09%09layout.addWidget(label_text, 0, 0)%0A%09%09layout.addWidget(self.line_edit_text, 0, 1)%0A%09%09layout.addWidget(label_hex, 1, 0)%0A%09%09layout.addWidget(self.line_edit_hex, 1, 1)%0A%09%09layout.addWidget(self.convert_button, 2, 0)%0A%09%09layout.addWidget(self.label_cp, 2, 1)%0A%09%09layout.addWidget(self.exit_button, 2, 2)%0A%0A%09%09self.convert_button.clicked.connect(self.convertor)%0A%09%09self.exit_button.clicked.connect(self.close)%0A%09%09self.setLayout(layout)%0A%0A%0A%09def convertor(self):%0A%09%09data = self.line_edit_text.text()%0A%09%09hex_text = binascii.hexlify(bytes(data, 'utf-8'))%0A%09%09hex_text = str(hex_text)%0A%09%09hex_text = hex_text.replace(%22b'%22, %22%22)%0A%09%09hex_text = hex_text.replace(%22'%22, %22%22)%0A%09%09hex_text = %220x%22+hex_text%0A%09%09self.line_edit_hex.setText(hex_text)%0A%09%09if hex_text == %220x%22:%0A%09%09%09self.line_edit_hex.setText(%22%22)%0A%0Aapp = QApplication(sys.argv)%0Adialog = TextToHex()%0Adialog.show()%0Aapp.exec_()%0A
dce13f074187cb95644b0ac3cfd84d1e0649f93c
Fix bytes/str handling in disqus SSO.
mezzanine/generic/templatetags/disqus_tags.py
mezzanine/generic/templatetags/disqus_tags.py
from __future__ import unicode_literals from future.builtins import int, str import base64 import hashlib import hmac import json import time from mezzanine import template register = template.Library() @register.simple_tag def disqus_id_for(obj): """ Returns a unique identifier for the object to be used in DISQUS JavaScript. """ return "%s-%s" % (obj._meta.object_name, obj.id) @register.inclusion_tag("generic/includes/disqus_sso.html", takes_context=True) def disqus_sso_script(context): """ Provides a generic context variable which adds single-sign-on support to DISQUS if ``COMMENTS_DISQUS_API_PUBLIC_KEY`` and ``COMMENTS_DISQUS_API_SECRET_KEY`` are specified. """ settings = context["settings"] public_key = getattr(settings, "COMMENTS_DISQUS_API_PUBLIC_KEY", "") secret_key = getattr(settings, "COMMENTS_DISQUS_API_SECRET_KEY", "") user = context["request"].user if public_key and secret_key and user.is_authenticated(): context["public_key"] = public_key context["sso_data"] = _get_disqus_sso(user, public_key, secret_key) return context def _get_disqus_sso(user, public_key, secret_key): # Based on snippet provided on http://docs.disqus.com/developers/sso/ # create a JSON packet of our data attributes data = json.dumps({ 'id': '%s' % user.id, 'username': user.username, 'email': user.email, }) # encode the data to base64 message = base64.b64encode(data) # generate a timestamp for signing the message timestamp = int(time.time()) # generate our hmac signature sig = hmac.HMAC(str(secret_key), '%s %s' % (message, timestamp), hashlib.sha1).hexdigest() # Messages are of the form <message> <signature> <timestamp> return '%s %s %s' % (message, sig, timestamp)
Python
0
@@ -65,16 +65,18 @@ ort -int, str +bytes, int %0A%0Aim @@ -1498,20 +1498,44 @@ 4encode( -data +bytes(data, encoding=%22utf8%22) )%0A # @@ -1670,11 +1670,13 @@ MAC( -str +bytes (sec @@ -1686,11 +1686,54 @@ _key -) , +encoding=%22utf8%22),%0A bytes( '%25s @@ -1759,16 +1759,34 @@ estamp), + encoding=%22utf8%22), %0A
913a77592a9f399820cddbc7753c24182ad21639
Add options for plots
src/rnaseq_lib/plot/opts.py
src/rnaseq_lib/plot/opts.py
Python
0.000001
@@ -0,0 +1,643 @@ +gene_curves = %7B%0A 'Curve': %7B'plot': dict(height=120, width=600, tools=%5B'hover'%5D, invert_xaxis=True, yrotation=45, yaxis='left'),%0A 'style': dict(line_width=1.5)%7D,%0A 'Curve.Percentage_of_Normal_Samples': %7B'plot': dict(xaxis=None, invert_yaxis=True),%0A 'style': dict(color='Blue')%7D,%0A 'Curve.Gene_Expression': %7B'plot': dict(xaxis=None),%0A 'style': dict(color='Green')%7D,%0A 'Curve.Log2_Fold_Change': %7B'plot': dict(height=150),%0A 'style': dict(color='Purple')%7D,%0A 'Scatter': %7B'style': dict(color='red', size=3)%7D%7D%0A%0Agene_kde = %7B%7D%0A
9fb860e0c5b0ff6e696b8102197c3255f7b2d3d7
The goods
graph_role_deps.py
graph_role_deps.py
Python
0.998626
@@ -0,0 +1,710 @@ +#!/usr/bin/python%0A'''Graphs role dependencies in roles/ as a graphviz digraph'''%0A%0Aimport os%0Aimport yaml%0Aimport sys%0A%0Aprint 'digraph %7B'%0A%0Afor role in os.listdir('./roles'):%0A try:%0A with open('./roles/%25s/meta/main.yml' %25 role) as meta:%0A data = yaml.load(meta)%0A except Exception as exc:%0A print %3E%3Esys.stderr, 'Skipping %25s: %25r' %25 (role, exc) %0A continue%0A%0A try:%0A deps = data%5B'dependencies'%5D%0A except Exception as exc:%0A print %3E%3Esys.stderr, 'Skipping %25s: %25r' %25 (role, exc)%0A continue%0A%0A print '%5Ct%22%25s%22 -%3E %7B' %25 role,%0A for dep in deps:%0A print %3E%3Esys.stderr, 'dep:', dep%0A name = dep%5B'role'%5D%0A print '%22%25s%22' %25 name,%0A print '%7D'%0A%0Aprint '%7D'%0A
fa1e30635f57aaffdc74eaa307b8c74f89bf50ae
add base gender choices object
accelerator_abstract/models/base_gender_choices.py
accelerator_abstract/models/base_gender_choices.py
Python
0.001809
@@ -0,0 +1,1077 @@ +# MIT License%0A# Copyright (c) 2017 MassChallenge, Inc.%0A%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models%0A%0Afrom accelerator_abstract.models.accelerator_model import AcceleratorModel%0A%0AGENDER_MALE_CHOICE = %22Male%22%0AGENDER_FEMALE_CHOICE = %22Female%22%0AGENDER_CISGENDER_CHOICE = %22Cisgender%22%0AGENDER_TRANSGENDER_CHOICE = %22Transgender%22%0AGENDER_NON_BINARY_CHOICE = %22Non-Binary%22%0AGENDER_PREFER_TO_SELF_DESCRIBE_CHOICE = %22I Prefer To Self-describe%22%0AGENDER_PREFER_NOT_TO_SAY_CHOICE = %22I Prefer Not To Say%22%0A%0AGENDER_CHOICES = (%0A GENDER_MALE_CHOICE,%0A GENDER_FEMALE_CHOICE,%0A GENDER_CISGENDER_CHOICE,%0A GENDER_TRANSGENDER_CHOICE,%0A GENDER_NON_BINARY_CHOICE,%0A GENDER_PREFER_TO_SELF_DESCRIBE_CHOICE,%0A GENDER_PREFER_NOT_TO_SAY_CHOICE%0A)%0A%0A%0Aclass BaseGenderChoices(AcceleratorModel):%0A name = models.CharField(max_length=255, unique=True)%0A%0A class Meta(AcceleratorModel.Meta):%0A db_table = 'accelerator_genderchoices'%0A abstract = True%0A ordering = %5B'name', %5D%0A verbose_name = %22Gender Choice%22%0A verbose_name_plural = %22Gender Choices%22%0A
b55ef35a68305269e8a49a8afcdf46d94d06361f
add drf module
src/common/drf.py
src/common/drf.py
Python
0.000001
@@ -0,0 +1,227 @@ +from rest_framework.exceptions import APIException%0A%0Aclass ServiceUnavailable(APIException):%0A status_code = 503%0A default_detail = 'Service temporarily unavailable, try again later.'%0A default_code = 'service_unavailable'
3f7b6649db97547b77ed757aae28eeeb739fa154
fix a bug in reading the boolean environment variable
tweeza/app.py
tweeza/app.py
# -*- coding: utf-8 -*- """ The main application entry. """ import os from flask import Flask, request, g, render_template from config import DevelopmentConfig # Bluprints: from frontend.views import frontend from dashboard.views import dashboard from users import users, User from items.views import items # from flask.ext.security import Security from utils import current_year, pretty_date from extensions import (db, mail, babel, login_manager, bcrypt, gravatar) from flask.ext.mongoengine import MongoEngineSessionInterface # For import * __all__ = ['create_app'] DEFAULT_BLUEPRINTS = ( frontend, dashboard, users, items, ) def create_app(config=None, app_name=None, blueprints=None): """Create a Flask app.""" if app_name is None: app_name = DevelopmentConfig.PROJECT if blueprints is None: blueprints = DEFAULT_BLUEPRINTS app = Flask(app_name, instance_relative_config=True) configure_app(app, config) configure_hook(app) configure_blueprints(app, blueprints) configure_extensions(app) configure_logging(app) configure_template_filters(app) configure_error_handlers(app) return app def configure_app(app, config=None): """ Looks for the 'config.cfg' file under the instance folder then load it or fallbacks to example.cfg """ config_file = None if os.environ.get('PRODUCTION'): # are we in production? config_file = os.path.join(app.instance_path, 'production.cfg') else: # No? use development config with Debug mode On config_file = os.path.join(app.instance_path, 'config.cfg') if not os.path.isfile(config_file): config_file = os.path.join(app.instance_path, 'example.cfg') print(config_file) try: app.config.from_pyfile(config_file) except IOError: print("didn't find any configuration files!\nexiting...") raise SystemExit os.environ['DEBUG'] = "1" # required to test Oauth with github def configure_extensions(app): # flask-mongoengine db.init_app(app) app.session_interface = MongoEngineSessionInterface(db) # flask-mail mail.init_app(app) # flask-babel babel.init_app(app) # Bcrypt for hashing passwords bcrypt.init_app(app) # the Gravatar service gravatar.init_app(app) # Debug Toolbar from flask_debugtoolbar import DebugToolbarExtension DebugToolbarExtension(app) @babel.localeselector def get_locale(): """ Get the current request locale. returns String """ if not hasattr(g, 'lang'): g.lang = 'fr' accept_languages = app.config.get('ACCEPT_LANGUAGES') return g.lang or request.accept_languages.best_match(accept_languages) # flask-login login_manager.login_view = 'frontend.login' login_manager.refresh_view = 'frontend.reauth' @login_manager.user_loader def load_user(user_id): return User.objects(id=user_id).first() login_manager.setup_app(app) def configure_blueprints(app, blueprints): """Configure blueprints in views.""" for blueprint in blueprints: app.register_blueprint(blueprint) def configure_template_filters(app): @app.template_filter('prettify') def prettify(value): return pretty_date(value) @app.template_filter('format_date') def format_date(value, format='%Y-%m-%d'): return value.strftime(format) def configure_logging(app): """Configure file(info) and email(error) logging.""" return if app.debug or app.testing: # Skip debug and test mode. Just check standard output. return import logging from logging.handlers import SMTPHandler # Set info level on logger, which might be overwritten by handers. # Suppress DEBUG messages. app.logger.setLevel(logging.INFO) info_log = os.path.join(app.config['LOG_FOLDER'], 'info.log') info_file_handler = logging.handlers.RotatingFileHandler(info_log, maxBytes=100000, backupCount=10) info_file_handler.setLevel(logging.INFO) info_file_handler.setFormatter(logging.Formatter( '%(asctime)s %(levelname)s: %(message)s ' '[in %(pathname)s:%(lineno)d]') ) app.logger.addHandler(info_file_handler) # Testing #app.logger.info("testing info.") #app.logger.warn("testing warn.") #app.logger.error("testing error.") mail_handler = SMTPHandler(app.config['MAIL_SERVER'], app.config['MAIL_USERNAME'], app.config['ADMINS'], 'O_ops... %s failed!' % app.config['PROJECT'], (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])) mail_handler.setLevel(logging.ERROR) mail_handler.setFormatter(logging.Formatter( '%(asctime)s %(levelname)s: %(message)s ' '[in %(pathname)s:%(lineno)d]') ) app.logger.addHandler(mail_handler) def configure_hook(app): @app.before_request def before_request(): pass @app.url_defaults def add_language_code(endpoint, values): if hasattr(g, 'lang') and not g.lang: g.lang = 'fr' values.setdefault('lang', g.lang) @app.url_value_preprocessor def pull_lang_code(endpoint, values): if values: g.lang = values.pop('lang', None) if hasattr(g, 'lang') and not g.lang: g.lang = 'fr' @app.context_processor def utility_processor(): return dict(current_year=current_year) def configure_error_handlers(app): @app.errorhandler(403) def forbidden_page(error): return render_template("errors/forbidden_page.html"), 403 @app.errorhandler(404) def page_not_found(error): return render_template("errors/page_not_found.html"), 404 @app.errorhandler(500) def server_error_page(error): return render_template("errors/server_error.html"), 500 app = create_app() if __name__ == "__main__": app.run()
Python
0.000039
@@ -1352,24 +1352,57 @@ .cfg%0A %22%22%22 +%0A%0A import ast # Python %3E= 2.6 %0A config_ @@ -1421,16 +1421,37 @@ %0A if +ast.literal_eval(str( os.envir @@ -1474,19 +1474,14 @@ ON') +)) : # - are we in
cdfee7e893564157e2143f20dea0b10c8bd33cfb
Create pythonLock.py
ving2/pythonLock.py
ving2/pythonLock.py
Python
0.00002
@@ -0,0 +1,849 @@ +%0A%0Afrom threading import Thread%0Afrom threading import Lock%0Ai = 0%0A%0Adef someThreadFunction1(lock): %0A# Potentially useful thing:%0A# In Python you %22import%22 a global variable, instead of %22export%22ing it when you declare it%0A# (This is probably an effort to make you feel bad about typing the word %22global%22)%0A global i%0A for j in range (0,1000000):%0A lock.acquire()%0A i += 1%0A lock.release()%0A%0A%0Adef someThreadFunction2(lock):%0A global i%0A for j in range (0,1000000):%0A lock.acquire()%0A i -= 1%0A lock.release()%0A %0Adef main():%0A lock = Lock()%0A someThread1 = Thread(target = someThreadFunction1, args = (%5Block%5D))%0A someThread1.start()%0A someThread2 = Thread(target = someThreadFunction2, args = (%5Block%5D))%0A someThread2.start()%0A %0A someThread1.join()%0A someThread2.join()%0A print(i)%0A%0Amain()%0A
01c3a20cd7ef44fe0363d8a270425699669af74f
FIX json in metadata - new resource
test/acceptance/component/get_productandrelease_list/features/steps.py
test/acceptance/component/get_productandrelease_list/features/steps.py
from lettuce import step, world from commons.product_steps import ProductSteps from commons.rest_utils import RestUtils from commons.constants import * from commons.utils import response_body_to_dict from nose.tools import assert_equals, assert_true, assert_false, assert_in api_utils = RestUtils() product_steps = ProductSteps() def check_if_product_is_in_list(response, product_release): """ Checks if product is in response list with his attribute and metadatas - Assertions: * Metadata (default metadatas) * Attributes (if exist) * Product existence in list :param response: Response from API - dic :param product_release: Product release version - str :return: None """ found = False for product_and_release in response: if product_and_release[PRODUCT][PRODUCT_NAME] == world.product_name: if product_release is None or product_and_release[VERSION] == product_release: found = True for metadata in DEFAULT_METADATA[METADATA]: assert_in(metadata, product_and_release[PRODUCT][PRODUCT_METADATAS], "Metadata are not the expected!") if world.attributes is not None: assert_equals(product_and_release[PRODUCT][PRODUCT_ATTRIBUTES], world.attributes, "Attributes are not expected!") break assert_true(found, "Product and release not found in list!") @step(u'a created product with this name "([^"]*)"') def a_created_product_with_name(step, product_name): world.product_name = product_name product_steps.a_created_product_with_name(step, product_name) @step(u'a created product with name "([^"]*)" and release "([^"]*)"') def a_created_product_with_name_group1_and_release_group2(step, product_name, product_release): world.product_name = product_name world.product_release = product_release product_steps.a_created_product_with_name_and_release(step=step, product_name=product_name, product_version=product_release) @step(u'a created product with name "([^"]*)" and releases:') def a_create_product_with_name_group1_and_releases(step, product_name): world.product_name = product_name world.product_release = [] for row in step.hashes: world.product_release.append(row['release']) product_steps.a_created_product_with_name_and_release_list(step, product_name, world.product_release) @step(u'accept header value "([^"]*)"') def accept_header_value_group1(step, accept_header): world.headers[ACCEPT_HEADER] = accept_header @step(u'default product attributes') def default_product_attributes(step): world.attributes = DEFAULT_ATTRIBUTE[ATTRIBUTE] @step(u'the authentication token "([^"]*)":') def the_authentication_token_group1(step, token): world.headers[AUTH_TOKEN_HEADER] = token @step(u'the authentication tenant-id "([^"]*)"') def the_authentication_tenant_id_group1(step, tenant_id): world.headers[TENANT_ID_HEADER] = tenant_id @step(u'I retrieve the product list with its releases') def i_retrieve_the_product_list_with_its_releases(step): world.response = api_utils.retrieve_productandrelease_list(headers=world.headers) @step(u'I use a invalid HTTP "([^"]*)" method') def i_use_a_invalid_http_group1_method(step, http_method): world.response = api_utils.request_productandrelease(headers=world.headers, method=http_method) @step(u'the list is returned') def the_list_is_returned(step): assert_true(world.response.ok, 'RESPONSE: {}'.format(world.response.content)) response_headers = world.response.headers assert_in(response_headers[CONTENT_TYPE], world.headers[ACCEPT_HEADER], 'RESPONSE HEADERS: {}'.format(world.response.headers)) @step(u'the product with its release is in the list') def the_product_with_its_release_is_in_the_list(step): response = response_body_to_dict(world.response, world.headers[ACCEPT_HEADER], xml_root_element_name=PRODUCTANDRELEASE_LIST, is_list=True) assert_true(len(response) != 0) check_if_product_is_in_list(response, world.product_release) @step(u'the product with all its releases is in the list') def the_product_with_all_its_releases_is_in_the_list(step): response = response_body_to_dict(world.response, world.headers[ACCEPT_HEADER], xml_root_element_name=PRODUCTANDRELEASE_LIST, is_list=True) for release in world.product_release: check_if_product_is_in_list(response, release) @step(u'the product is not in the list') def the_product_is_not_in_the_list(step): response = response_body_to_dict(world.response, world.headers[ACCEPT_HEADER], xml_root_element_name=PRODUCTANDRELEASE_LIST, is_list=True) found = False if len(response) != 0: for product_and_release in response: if product_and_release[PRODUCT][PRODUCT_NAME] == world.product_name: found = True break assert_false(found, "Product is in the list and it shouldn't!") @step(u'I obtain an http error code "([^"]*)"') def i_obtain_an_http_error_code_group1(step, error_code): assert_equals(str(world.response.status_code), error_code)
Python
0
@@ -193,16 +193,61 @@ _to_dict +, replace_none_value_metadata_to_empty_string %0A%0Afrom n @@ -1103,16 +1103,205 @@ ADATA%5D:%0A + # Workaround: xmldict manage Empty values as None value%0A replace_none_value_metadata_to_empty_string(product_and_release%5BPRODUCT%5D%5BPRODUCT_METADATAS%5D)%0A
9266e24e616174cc37b5e6f7926dfda81471abb5
Initialize PracticeQuestions
books/CrackingCodesWithPython/Chapter13/PracticeQuestions.py
books/CrackingCodesWithPython/Chapter13/PracticeQuestions.py
Python
0
@@ -0,0 +1,688 @@ +# Chapter 13 Practice Questions%0A%0A# 1. What do the following expressions evaluate to?%0Aprint(17 %25 1000)%0Aprint(5 %25 5)%0A%0A# 2. What is the GCD of 10 and 15?%0A# Don't do this - imports should be at the top of the file%0Afrom books.CrackingCodesWithPython.Chapter13.cryptomath import gcd%0Aprint(gcd(10, 15))%0A%0A# 3. What does spam contain after executing spam, eggs = 'hello', 'world'?%0Aspam, eggs = 'hello', 'world'%0Aprint(spam)%0A%0A# 4. The GCD of 17 and 31 is 1. Are 17 and 31 relatively prime?%0Aif not gcd(17, 31) == 1:%0A print(%22No%22)%0Aelse:%0A print(%22Yes%22)%0A%0A# 5. Why aren't 6 and 8 relatively prime?%0Aprint(gcd(6, 8))%0A%0A# 6. What is the formula for the modular inverse of A mod C?%0A# Hint: check page 183%0A
c9e90ef5413bd560422e915d213df73ad88dffd7
Add apigateway integration test for PutIntegration
tests/integration/test_apigateway.py
tests/integration/test_apigateway.py
Python
0
@@ -0,0 +1,1954 @@ +# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22). You%0A# may not use this file except in compliance with the License. A copy of%0A# the License is located at%0A#%0A# http://aws.amazon.com/apache2.0/%0A#%0A# or in the %22license%22 file accompanying this file. This file is%0A# distributed on an %22AS IS%22 BASIS, WITHOUT WARRANTIES OR CONDITIONS OF%0A# ANY KIND, either express or implied. See the License for the specific%0A# language governing permissions and limitations under the License.%0Afrom tests import unittest%0A%0Aimport botocore.session%0A%0A%0Aclass TestApigateway(unittest.TestCase):%0A def setUp(self):%0A self.session = botocore.session.get_session()%0A self.client = self.session.create_client('apigateway', 'us-east-1')%0A%0A # Create a resoruce to use with this client.%0A self.api_name = 'mytestapi'%0A self.api_id = self.client.create_rest_api(name=self.api_name)%5B'id'%5D%0A%0A def tearDown(self):%0A self.client.delete_rest_api(restApiId=self.api_id)%0A%0A def test_put_integration(self):%0A # The only resource on a brand new api is the path. So use that ID.%0A path_resource_id = self.client.get_resources(%0A restApiId=self.api_id)%5B'items'%5D%5B0%5D%5B'id'%5D%0A%0A # Create a method for the resource.%0A self.client.put_method(%0A restApiId=self.api_id,%0A resourceId=path_resource_id,%0A httpMethod='GET',%0A authorizationType='None'%0A )%0A%0A # Put an integration on the method.%0A response = self.client.put_integration(%0A restApiId=self.api_id,%0A resourceId=path_resource_id,%0A httpMethod='GET',%0A type='HTTP',%0A integrationHttpMethod='GET',%0A uri='https://api.endpoint.com'%0A )%0A # Assert the response was successful by checking the integration type%0A self.assertEqual(response%5B'type'%5D, 'HTTP')%0A
4ce7a1932d9cde635263a4fe5a80af57589e1cfa
add NASM 2.13.02 Conan package recipe
build_env/Conan/packages/NASM/2.13.02/conanfile.py
build_env/Conan/packages/NASM/2.13.02/conanfile.py
Python
0
@@ -0,0 +1,1515 @@ +import os%0Afrom conans import ConanFile, AutoToolsBuildEnvironment, tools%0A%0A%0A%0Aclass NASM(ConanFile):%0A%09name = %22NASM%22%0A%09version = %222.13.02%22%0A%09url = %22http://www.nasm.us%22%0A%09settings = %7B%22os%22: %5B%22Linux%22%5D%7D%0A%0A%0A%0A%09def getSubdirectories(self, d):%0A%09%09return %5B f for f in os.listdir(d) if os.path.isdir(f) %5D%0A%0A%0A%0A%09def source(self):%0A%09%09self.output.info(%22%22)%0A%09%09self.output.info(%22---------- source ----------%22)%0A%09%09self.output.info(%22%22)%0A%09%09%0A%09%09filename = %22nasm-%22 + self.version + %22.tar.bz2%22%0A%09%09url = %22http://www.nasm.us/pub/nasm/releasebuilds/%22 + self.version + %22/%22 + filename%0A%09%09self.output.info(%22downloading %22 + url)%0A%09%09tools.download(url, filename, retry=3, retry_wait=10)%0A%09%09tools.unzip(filename, self.source_folder)%0A%0A%09%09dirnames = self.getSubdirectories(self.source_folder)%0A%09%09%0A%09%09if len(dirnames) %3C 1:%0A%09%09%09raise Exception(%22archive does not contain any subdirectories%22)%0A%09%09%0A%09%09os.rename(dirnames%5B0%5D, self.name)%0A%09%09os.remove(filename)%0A%0A%0A%0A%09def build(self):%0A%09%09self.output.info(%22%22)%0A%09%09self.output.info(%22---------- build ----------%22)%0A%09%09self.output.info(%22%22)%0A%09%09%09%0A%09%09with tools.chdir(self.name):%0A%09%09%09env = AutoToolsBuildEnvironment(self)%0A%09%09%09env.configure(args=%5B%22--prefix=%22 + self.package_folder%5D)%0A%09%09%09env.make()%0A%09%09%09env.make(args=%5B%22install%22%5D)%0A%0A%0A%0A%09def package(self):%0A%09%09self.output.info(%22%22)%0A%09%09self.output.info(%22---------- package ----------%22)%0A%09%09self.output.info(%22%22)%0A%0A%0A%0A%09def package_info(self):%0A%09%09self.output.info(%22%22)%0A%09%09self.output.info(%22---------- package_info ----------%22)%0A%09%09self.output.info(%22%22)%0A%09%09%0A%09%09self.env_info.PATH.append(os.path.join(self.package_folder, %22bin%22))
9fb564d8f02d92432a62be02c906e3b227f48c10
Create add_results_new.py
run_tests/shaker_run/add_results_new.py
run_tests/shaker_run/add_results_new.py
Python
0.000004
@@ -0,0 +1,742 @@ +custom_res1 = %5B%7B'status_id': 5, 'content': 'Check %5BOperations per second Median; iops%5D', 'expected': '88888', 'actual': '7777'%7D,%7B'status_id': 5, 'content': 'Check %5Bdeviation; %25%5D', 'expected': '5555', 'actual': '9999'%7D%5D%0Ares1 = %7B'test_id': test_4kib_read, 'status_id': 5, 'custom_test_case_steps_results': custom_res1%7D%0Ares2 = %7B'test_id': test_4kib_write, 'status_id': 5, 'custom_test_case_steps_results': %5B%7B'status_id': 5, 'content': 'Check %5BOperations per second Median; iops%5D', 'expected': '20202', 'actual': '30303'%7D,%7B'status_id': 5, 'content': 'Check %5Bdeviation; %25%5D', 'expected': '90909', 'actual': '80808'%7D%5D%7D%0Aresults_list = %5Bres1, res2%5D%0Ares_all = %7B'results': results_list%7D%0A%0Aprint client.send_post('add_results/%7B%7D'.format(run_id), res_all)%0A
729f1c5147e4d4ce242d73731c8e455b2a50fca3
add 188
vol4/188.py
vol4/188.py
Python
0.999986
@@ -0,0 +1,223 @@ +def tetration(a, b, m):%0A t0 = 1%0A for i in range(b):%0A t1 = pow(a, t0, m)%0A if t0 == t1:%0A break%0A t0 = t1%0A return t0%0A%0Aif __name__ == %22__main__%22:%0A print tetration(1777, 1855, 10 ** 8)%0A
98c1ff71d57749168f0ca35d97dbe77a8a67e082
Add module for utilities related to xgboost
mltils/xgboost/utils.py
mltils/xgboost/utils.py
Python
0
@@ -0,0 +1,304 @@ +%0Axgb_to_sklearn = %7B%0A 'eta': 'learning_rate',%0A 'num_boost_round': 'n_estimators',%0A 'alpha': 'reg_alpha',%0A 'lambda': 'reg_lambda',%0A 'seed': 'random_state',%0A%7D%0A%0Adef to_sklearn_api(params):%0A return %7B%0A xgb_to_sklearn.get(key, key): value%0A for key, value in params.items()%0A %7D%0A
bbb10ba41db6f70512fe6bcb5207377606a22455
Create Mordecai_Output.py
Geoparser_Comparison/English/Mordecai_Output.py
Geoparser_Comparison/English/Mordecai_Output.py
Python
0.000198
@@ -0,0 +1,1593 @@ +#!/usr/bin/env python2%0A# -*- coding: utf-8 -*-%0A%22%22%22%0ADownload and run Mordecai from following link: %0A %0A %22https://github.com/openeventdata/mordecai%22%0A %0A To change the corpus, just change the name in main function.%0A%22%22%22%0A%0Aimport xml.etree.ElementTree as et%0Aimport re%0Aimport json, sys%0Aimport requests%0A%0A#reload(sys)%0A#sys.setdefaultencoding(%22utf-8%22)%0A%0A%0Adef Mordecai(text):%0A%0A headers = %7B'Content-Type': 'application/json'%7D%0A place=list()%0A %0A data = %7B'text': text%7D%0A %0A data = json.dumps(data) %0A out = requests.post('http://localhost:5000/places', data=data, headers=headers)%0A parsed_json = json.loads(out.text)%0A try:%0A for e in parsed_json:%0A #print e%0A index = %5Bm.start() for m in re.finditer(e%5B'placename'%5D.strip(), text)%5D%0A for ind in index: %0A place.append(e%5B'searchterm'%5D + %22,,%22 + e%5B'placename'%5D + %22,,%22 + str(e%5B'lat'%5D) + %22,,%22 + str(e%5B'lon'%5D) + %22,,%22+ str(ind) +',,'+ str(ind +len(e%5B'placename'%5D.strip()) ))%0A %0A except:%0A pass %0A %0A return place%0A%0A%0A%0Aif __name__ == '__main__':%0A %0A f = open('./data/wiki_mordecai_Original.txt' , 'w') #change it if your data is lgl.xml%0A %0A tree = et.parse('./WikToR(SciPaper).xml') #change it if your data is lgl.xml%0A root = tree.getroot()%0A c = 0%0A for child in root:%0A c +=1%0A print c%0A%0A %0A text = child.find('text').text%0A place = Mordecai(text)%0A %0A if (place): %0A for t in place:%0A f.write(t + %22%7C%7C%22)%0A f.write(%22%5Cn%22)%0A f.flush()%0A
9d98c3280d4e9dc6dda172d11e02922fc9958471
add homwork01_v0.2.py
01/homwork01_v0.2.py
01/homwork01_v0.2.py
Python
0.000065
@@ -0,0 +1,463 @@ +#!/usr/bin/env python%0A#coding=utf-8%0A%0Anum_list = %5B1,2,3,2,12,3,1,3,21,2,2,3,4111,22,3333,444,111,4,5,777,65555,45,33,45%5D%0A%0Amax2 = max1 = num_list%5B0%5D%0A# print max1, max2%0A# max1 bigger than max2%0A# 1. n%3Emax1 and n%3Emax2%0A# 2. n%3C=max1 and n%3Emax2%0A# 3. n%3Cmax1 and n%3C=max2%0A%0Afor n in num_list:%0A if n %3E max2:%0A if n %3E max1:%0A max2 = max1%0A max1 = n%0A elif n %3C max1:%0A max2 = n%0A%0Aprint %22Two large numbers are: %25d, %25d%22 %25 (max1, max2)%0A%0A
7b27f4cdb8135e7d5fd18ff11e2eae9325e6f17a
Move METROPOLIS_FORK_BLKNUM
ethereum/config.py
ethereum/config.py
from rlp.utils import decode_hex from ethereum import utils from ethereum.db import BaseDB default_config = dict( # Genesis block difficulty GENESIS_DIFFICULTY=131072, # Genesis block gas limit GENESIS_GAS_LIMIT=3141592, # Genesis block prevhash, coinbase, nonce GENESIS_PREVHASH=b'\x00' * 32, GENESIS_COINBASE=b'\x00' * 20, GENESIS_NONCE=utils.zpad(utils.encode_int(42), 8), GENESIS_MIXHASH=b'\x00' * 32, GENESIS_TIMESTAMP=0, GENESIS_EXTRA_DATA=b'', GENESIS_INITIAL_ALLOC={}, # Minimum gas limit MIN_GAS_LIMIT=5000, # Gas limit adjustment algo: # block.gas_limit=block.parent.gas_limit * 1023/1024 + # (block.gas_used * 6 / 5) / 1024 GASLIMIT_EMA_FACTOR=1024, GASLIMIT_ADJMAX_FACTOR=1024, BLKLIM_FACTOR_NOM=3, BLKLIM_FACTOR_DEN=2, # Block reward BLOCK_REWARD=5000 * utils.denoms.finney, NEPHEW_REWARD=5000 * utils.denoms.finney // 32, # BLOCK_REWARD / 32 # GHOST constants UNCLE_DEPTH_PENALTY_FACTOR=8, MAX_UNCLE_DEPTH=6, # max (block.number - uncle.number) MAX_UNCLES=2, # Difficulty adjustment constants DIFF_ADJUSTMENT_CUTOFF=13, BLOCK_DIFF_FACTOR=2048, MIN_DIFF=131072, # PoW info POW_EPOCH_LENGTH=30000, # Maximum extra data length MAX_EXTRADATA_LENGTH=32, # Exponential difficulty timebomb period EXPDIFF_PERIOD=100000, EXPDIFF_FREE_PERIODS=2, # Blank account initial nonce ACCOUNT_INITIAL_NONCE=0, # Homestead fork HOMESTEAD_FORK_BLKNUM=1150000, HOMESTEAD_DIFF_ADJUSTMENT_CUTOFF=10, # Metropolis fork METROPOLIS_FORK_BLKNUM=99999999, METROPOLIS_ENTRY_POINT=2 ** 160 - 1, METROPOLIS_STATEROOT_STORE=0x10, METROPOLIS_BLOCKHASH_STORE=0x20, METROPOLIS_WRAPAROUND=65536, METROPOLIS_GETTER_CODE=decode_hex('6000355460205260206020f3'), METROPOLIS_DIFF_ADJUSTMENT_CUTOFF=9, # Metropolis fork ) assert default_config['NEPHEW_REWARD'] == \ default_config['BLOCK_REWARD'] // 32 class Env(object): def __init__(self, db, config=None, global_config=None): assert isinstance(db, BaseDB) self.db = db self.config = config or dict(default_config) self.global_config = global_config or dict()
Python
0.000005
@@ -1634,16 +1634,16 @@ NUM= -99999999 +2 ** 100 ,%0A
73bc2dbfe40db224a38725f4412e33b1b5accac6
Add script example.
examples/script.py
examples/script.py
Python
0
@@ -0,0 +1,1351 @@ +# Copyright (c) 2013 Jordan Halterman %[email protected]%3E%0A# See LICENSE for details.%0Aimport sys, os%0Asys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))%0A%0A# The Active Redis API provides native support for Redis server-side%0A# Lua scripting.%0Afrom active_redis import Script%0A%0Aclass PushMany(Script):%0A %22%22%22%0A Push several items on to a queue.%0A %22%22%22%0A # Define keyword argument names for keys used by the script.%0A keys = %5B'key'%5D%0A%0A # Define keyword argument names for all other arguments to the script.%0A args = %5B%5D%0A%0A # In this case, we're using a variable number of arguments. Note that%0A # when variable arguments are used, only the last defined argument%0A # may have a variable number.%0A variable_args = True%0A%0A # Finally, define the Lua script. This is just a simple example.%0A script = %22%22%22%0A local key = KEYS%5B1%5D%0A local vals = ARGV%0A redis.call('RPUSH', key, unpack(vals))%0A %22%22%22%0A%0A# Building upon the datatype example, we can extend the Queue class%0A# and make use of our script.%0Afrom datatype import Queue%0Afrom active_redis import registry%0A%[email protected]%0Aclass BetterQueue(Queue):%0A %22%22%22A better version of our queue.%22%22%22%0A type = 'better_queue'%0A%0A _scripts = %7B%0A 'pushmany': PushMany,%0A %7D%0A%0A def push_many(self, *args):%0A %22%22%22Pushes many items on to the queue.%22%22%22%0A return self._execute_script('pushmany', self.key, *args)%0A
fbaca2f2a0ceaa77606d9c24846a1a1b045dc460
remove deleted files from manifest
addons/l10n_lu/__openerp__.py
addons/l10n_lu/__openerp__.py
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # Copyright (C) 2011 Thamini S.à.R.L (<http://www.thamini.com>) # Copyright (C) 2011 ADN Consultants S.à.R.L (<http://www.adn-luxembourg.com>) # Copyright (C) 2012-today OpenERP SA (<http://openerp.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Luxembourg - Accounting', 'version': '1.0', 'category': 'Localization/Account Charts', 'description': """ This is the base module to manage the accounting chart for Luxembourg. ====================================================================== * the Luxembourg Official Chart of Accounts (law of June 2009 + 2011 chart and Taxes), * the Tax Code Chart for Luxembourg * the main taxes used in Luxembourg * default fiscal position for local, intracom, extracom """, 'author': 'OpenERP SA & ADN', 'website': 'http://www.openerp.com http://www.adn-luxembourg.com', 'depends': ['account', 'base_vat', 'base_iban'], 'init_xml': [], 'update_xml': [ # basic accounting data 'account.account.type-2011.csv', 'account.account.template-2011.csv', 'account.tax.code.template-2011.csv', 'account.chart.template-2011.csv', 'account.tax.template-2011.csv', # Change BRE: adds fiscal position 'account.fiscal.position.template-2011.csv', 'account.fiscal.position.tax.template-2011.csv', # configuration wizard, views, reports... 'l10n_lu_wizard.xml', 'account.tax.template.csv', 'l10n_lu_view.xml', 'wizard/print_vat_view.xml' ], 'test': ['test/l10n_lu_report.yml'], 'demo_xml': [], 'installable': True, 'auto_install': False, 'certificate': '0078164766621', 'images': ['images/config_chart_l10n_lu.jpeg','images/l10n_lu_chart.jpeg'], } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
Python
0
@@ -2381,44 +2381,8 @@ l',%0A - 'account.tax.template.csv',%0A
c0ee3bb87a26a57bc7dc1bd4e1aaf6136f94bc17
Add missing filters.py file in organizations
ain7/organizations/filters.py
ain7/organizations/filters.py
Python
0.000001
@@ -0,0 +1,1116 @@ +# -*- coding: utf-8%0A%22%22%22%0A ain7/organizations/filters.py%0A%22%22%22%0A#%0A# Copyright %C2%A9 2007-2015 AIn7 Devel Team%0A#%0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU General Public License as published by%0A# the Free Software Foundation; either version 2 of the License, or%0A# (at your option) any later version.%0A#%0A# This program is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU General Public License for more details.%0A#%0A# You should have received a copy of the GNU General Public License%0A# along with this program; if not, write to the Free Software%0A# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA%0A#%0A#%0A%0Aimport django_filters%0A%0Afrom ain7.organizations.models import Organization%0A%0A%0Aclass OrganizationFilter(django_filters.FilterSet):%0A%0A class Meta:%0A model = Organization%0A fields = %7B%0A 'name': %5B'icontains'%5D,%0A 'activity_field': %5B'icontains'%5D,%0A %7D%0A
f56181aaf6df758abb988d10c757c6eba72d5025
write beginning of method for storing probabilities in a hash
parser.py
parser.py
Python
0.000011
@@ -0,0 +1,303 @@ +import re%0A%0AprobabilityHash = %7B%5B%5D, %22%22%7D%0A#%5Bword1, word2%5D, count%0A%0Adef parseIntoProbabilityHash(text):%0A stripPunctuation = re.sub(ur%22%5B%5E%5Cw%5Cd'%5Cs%5D+%22,' ',text)%0A wordsInText = stripPunctuation.split()%0A n = 0%0A for word in wordsInText:%0A probabilityHash%5BwordsInText%5Bn%5D%5D = 1%0A return probabilityHash%0A%0A%0A%0A%0A%0A%0A%0A%0A%0A%0A
5e54e5ebf9add6d8bd879d963803ee57fd591f4b
Write new Preparation tests
whats_fresh/whats_fresh_api/tests/views/entry/test_new_preparation.py
whats_fresh/whats_fresh_api/tests/views/entry/test_new_preparation.py
Python
0.000001
@@ -0,0 +1,3642 @@ +from django.test import TestCase%0Afrom django.core.urlresolvers import reverse%0Afrom whats_fresh_api.models import *%0Afrom django.contrib.gis.db import models%0Aimport json%0A%0A%0Aclass NewPreparationTestCase(TestCase):%0A %22%22%22%0A Test that the New Preparation page works as expected.%0A%0A Things tested:%0A URLs reverse correctly%0A The outputted page has the correct form fields%0A POSTing %22correct%22 data will result in the creation of a new%0A object with the specified details%0A POSTing data with all fields missing (hitting %22save%22 without entering%0A data) returns the same field with notations of missing fields%0A %22%22%22%0A def test_url_endpoint(self):%0A url = reverse('new-preparation')%0A self.assertEqual(url, '/entry/preparations/new')%0A%0A def test_form_fields(self):%0A %22%22%22%0A Tests to see if the form contains all of the right fields%0A %22%22%22%0A response = self.client.get(reverse('new-preparation'))%0A%0A fields = %7B'name': 'input', 'description': 'input',%0A 'additional_info': 'select'%7D%0A form = response.context%5B'preparation_form'%5D%0A%0A for field in fields:%0A # for the Edit tests, you should be able to access%0A # form%5Bfield%5D.value%0A self.assertIn(fields%5Bfield%5D, str(form%5Bfield%5D))%0A%0A def test_successful_preparation_creation_minimal(self):%0A %22%22%22%0A POST a proper %22new preparation%22 command to the server, and see if the%0A new preparation appears in the database. All optional fields are null.%0A %22%22%22%0A Preparation.objects.all().delete()%0A%0A # Data that we'll post to the server to get the new preparation created%0A new_preparation = %7B%0A 'name': 'Fried', 'description': '', 'additional_info': ''%7D%0A%0A response = self.client.post(reverse('new-preparation'),%0A new_preparation)%0A%0A preparation = Preparation.objects.all()%5B0%5D%0A for field in new_preparation:%0A self.assertEqual(%0A getattr(preparation, field), new_preparation%5Bfield%5D)%0A%0A def test_successful_preparation_creation_maximal(self):%0A %22%22%22%0A POST a proper %22new preparation%22 command to the server, and see if the%0A new preparation appears in the database. All optional fields are used.%0A %22%22%22%0A Preparation.objects.all().delete()%0A%0A # Data that we'll post to the server to get the new preparation created%0A new_preparation = %7B%0A 'name': 'Fried',%0A 'description': 'Test Description',%0A 'additional_info': 'Fried food is good'%7D%0A%0A response = self.client.post(reverse('new-preparation'),%0A new_preparation)%0A%0A preparation = Preparation.objects.all()%5B0%5D%0A for field in new_preparation:%0A self.assertEqual(%0A getattr(preparation, field), new_preparation%5Bfield%5D)%0A%0A def test_no_data_error(self):%0A %22%22%22%0A POST a %22new preparation%22 command to the server missing all of the%0A required fields, and test to see what the error comes back as.%0A %22%22%22%0A # Create a list of all objects before sending bad POST data%0A all_preparations = Preparation.objects.all()%0A%0A response = self.client.post(reverse('new-preparation'))%0A required_fields = %5B'name'%5D%0A for field_name in required_fields:%0A self.assertIn(field_name,%0A response.context%5B'preparation_form'%5D.errors)%0A%0A # Test that we didn't add any new objects%0A self.assertEqual(%0A list(Preparation.objects.all()), list(all_preparations))%0A%0A
4d92b111eecd3ce938676edee36b288c42484905
test scraper for UKÄ
statscraper/scrapers/uka_scraper.py
statscraper/scrapers/uka_scraper.py
Python
0
@@ -0,0 +1,2562 @@ +# encoding: utf-8%0Au%22%22%22 A scraper to fetch Swedish university application statistics from%0A the Swedish Higher Education Authority (Universitetskansler%C3%A4mbetet, UK%C3%84),%0A at http://statistik.uka.se%0A%22%22%22%0Afrom statscraper import BaseScraper, Dataset, Dimension, Result, Collection%0Aimport requests%0Afrom bs4 import BeautifulSoup%0A%0A%0Aclass UKA(BaseScraper):%0A%0A def _fetch_itemslist(self, item):%0A %22%22%22 We only offer regional application stats.%0A Other collections are differently structured.%0A %22%22%22%0A if item.is_root:%0A yield Collection(%22regional%22,%0A label=%22New students by area and school.%22)%0A else:%0A yield Dataset(%22county%22,%0A label=%22New students by county, school and semester.%22)%0A%0A def _fetch_dimensions(self, dataset):%0A %22%22%22 Declaring available dimensions like this is not mandatory,%0A but nice, especially if they differ from dataset to dataset.%0A%0A If you are using a built in datatype, you can specify the dialect%0A you are expecting, to have values normalized. This scraper will%0A look for Swedish month names (e.g. 'Januari'), but return them%0A according to the Statscraper standard ('january').%0A %22%22%22%0A yield Dimension(u%22school%22)%0A yield Dimension(u%22semester%22)%0A yield Dimension(u%22year%22, datatype=%22year%22)%0A yield Dimension(u%22semester%22,%0A datatype=%22academic_term%22,%0A dialect=%22swedish%22)%0A%0A def _fetch_data(self, dataset, query=None):%0A url = %22http://statistik.uka.se/4.5d85793915901d205f935d0f.12.5d85793915901d205f965eab.portlet?action=resultat&view=resultTable&frageTyp=3&frageNr=240&tid=%25s&grupp1=%25s&grupp2=%25s%22%0A terms = %5B6%5D%0A counties = %5B%7B%0A 'id': %2210%22,%0A 'municipalities': %5B%2280%22%5D%0A %7D, %5D%0A for t in terms:%0A for c in counties:%0A for m in c%5B%22municipalities%22%5D:%0A html = requests.get(url %25 (t, c, m%5B%22id%22%5D)).text%0A soup = BeautifulSoup(html, 'html.parser')%0A table = soup.find(%22table%22)%0A row = table.find_all(%22tr%22)%5B5:%5D%0A cells = row.find_all(%22td%22)%0A print cells%5B0%5D.text,%0A print cells%5B2%5D.text%0A %22%22%22%0A yield Result(value.text.encode(%22utf-8%22), %7B%0A %22date%22: date,%0A %22month%22: month,%0A %22year%22: years%5Bi%5D,%0A %7D)%0A %22%22%22%0A
d2762f81a9f8ed405ca5fc9d567004af182d137b
add importer for delimited data
python/delim_import.py
python/delim_import.py
Python
0
@@ -0,0 +1,1618 @@ +from json_generator import JsonGenerator, writeTrackEntry%0A%0Adef delimImport(file, skipLines, colNames, dataDir, trackLabel, key = None,%0A delim = %22%5Ct%22, chunkBytes = 200000, compress = True,%0A config = %7B'style': %7B'className': 'feature2'%7D%7D ):%0A fh = open(file, 'r')%0A data = %5Bline.split(delim) for line in fh.readlines()%5D%0A fh.close()%0A%0A startIndex = colNames.index(%22Start%22)%0A endIndex = colNames.index(%22End%22)%0A chromIndex = colNames.index(%22Chrom%22)%0A%0A for item in data:%0A item%5BstartIndex%5D = int(item%5BstartIndex%5D)%0A item%5BendIndex%5D = int(item%5BendIndex%5D)%0A%0A def nclCmp(a, b):%0A if a%5BstartIndex%5D == b%5BstartIndex%5D:%0A return b%5BendIndex%5D - a%5BendIndex%5D%0A return a%5BstartIndex%5D - b%5BstartIndex%5D%0A%0A data.sort(nclCmp)%0A%0A curRef = None%0A jsongen = None%0A for item in data:%0A if item%5BchromIndex%5D != curRef:%0A if jsongen is not None:%0A jsongen.generateTrack()%0A curRef = item%5BchromIndex%5D%0A classMeta = %5B%7B'attributes': colNames,%0A 'proto': %7B'Chrom': item%5BchromIndex%5D%7D %7D %5D%0A jsongen = JsonGenerator(dataDir, trackLabel, item%5BchromIndex%5D,%0A chunkBytes, compress, classMeta, key)%0A jsongen.addSorted(%5B0%5D + item)%0A%0A if (jsongen is not None) and (jsongen.hasFeatures):%0A jsongen.generateTrack()%0A%0A #attrs = ArrayRepr%0A%0A config%5B'urlTemplate'%5D = jsongen.urlTemplate%0A writeTrackEntry(dataDir, 'FeatureTrack', trackLabel,%0A key if key is not None else trackLabel,%0A config)%0A %0A %0A
de456b7e6397d775bd244b7e20eb1d675ca1bde0
Add logging to attrib plugin
nose2/plugins/attrib.py
nose2/plugins/attrib.py
from unittest import TestSuite from nose2.events import Plugin undefined = object() # TODO: eval attribs class AttributeSelector(Plugin): """TODO: document""" def __init__(self): self.attribs = [] self.addOption(self.attribs, "A", "attr", "Attribulate") def startTestRun(self, event): if not self.attribs: return attribs = [] for attr in self.attribs: # all attributes within an attribute group must match attr_group = [] for attrib in attr.strip().split(","): # don't die on trailing comma if not attrib: continue items = attrib.split("=", 1) if len(items) > 1: # "name=value" # -> 'str(obj.name) == value' must be True key, value = items else: key = items[0] if key[0] == "!": # "!name" # 'bool(obj.name)' must be False key = key[1:] value = False else: # "name" # -> 'bool(obj.name)' must be True value = True attr_group.append((key, value)) attribs.append(attr_group) if not attribs: return event.suite = self.filterSuite(event.suite, attribs) def filterSuite(self, suite, attribs): new_suite = suite.__class__() for test in suite: if isinstance(test, TestSuite): new_suite.addTest(self.filterSuite(test, attribs)) elif self.validateAttrib(test, attribs): new_suite.addTest(test) return new_suite def validateAttrib(self, test, attribs): any_ = False for group in attribs: match = True for key, value in group: obj_value = self.getAttr(test, key) if callable(value): if not value(key, test): match = False break elif value is True: # value must exist and be True if not bool(obj_value): match = False break elif value is False: # value must not exist or be False if bool(obj_value): match = False break elif type(obj_value) in (list, tuple): # value must be found in the list attribute if not str(value).lower() in [str(x).lower() for x in obj_value]: match = False break else: # value must match, convert to string and compare if (value != obj_value and str(value).lower() != str(obj_value).lower()): match = False break any_ = any_ or match return any_ def getAttr(self, test, key): val = getattr(test, key, undefined) if val is not undefined: return val if hasattr(test, '_testFunc'): val = getattr(test._testFunc, key, undefined) if val is not undefined: return val elif hasattr(test, '_testMethodName'): meth = getattr(test, test._testMethodName, undefined) if meth is not undefined: val = getattr(meth, key, undefined) if val is not undefined: return val
Python
0
@@ -1,12 +1,27 @@ +import logging%0A from unittes @@ -73,16 +73,50 @@ Plugin%0A%0A +log = logging.getLogger(__name__)%0A undefine @@ -409,16 +409,81 @@ return%0A + log.debug('Attribute selector attribs %25s', self.attribs)%0A
554f139ed02caa436e6e17baf5d880402ced8e21
Raise exception if script contains invalid import
django_extensions/management/commands/runscript.py
django_extensions/management/commands/runscript.py
# -*- coding: utf-8 -*- import sys import importlib import traceback from django.apps import apps from django_extensions.management.email_notifications import EmailNotificationCommand from django_extensions.management.utils import signalcommand class Command(EmailNotificationCommand): help = 'Runs a script in django context.' def add_arguments(self, parser): super(Command, self).add_arguments(parser) parser.add_argument('script', nargs='+') parser.add_argument( '--fixtures', action='store_true', dest='infixtures', default=False, help='Only look in app.fixtures subdir', ) parser.add_argument( '--noscripts', action='store_true', dest='noscripts', default=False, help='Look in app.scripts subdir', ) parser.add_argument( '-s', '--silent', action='store_true', dest='silent', default=False, help='Run silently, do not show errors and tracebacks', ) parser.add_argument( '--no-traceback', action='store_true', dest='no_traceback', default=False, help='Do not show tracebacks', ) parser.add_argument( '--script-args', nargs='*', type=str, help='Space-separated argument list to be passed to the scripts. Note that the ' 'same arguments will be passed to all named scripts.', ) @signalcommand def handle(self, *args, **options): NOTICE = self.style.SQL_TABLE NOTICE2 = self.style.SQL_FIELD ERROR = self.style.ERROR ERROR2 = self.style.NOTICE subdirs = [] scripts = options['script'] if not options.get('noscripts'): subdirs.append('scripts') if options.get('infixtures'): subdirs.append('fixtures') verbosity = int(options.get('verbosity', 1)) show_traceback = options.get('traceback', True) if show_traceback is None: # XXX: traceback is set to None from Django ? show_traceback = True no_traceback = options.get('no_traceback', False) if no_traceback: show_traceback = False silent = options.get('silent', False) if silent: verbosity = 0 email_notifications = options.get('email_notifications', False) if len(subdirs) < 1: print(NOTICE("No subdirs to run left.")) return if len(scripts) < 1: print(ERROR("Script name required.")) return def run_script(mod, *script_args): try: mod.run(*script_args) if email_notifications: self.send_email_notification(notification_id=mod.__name__) except Exception: if silent: return if verbosity > 0: print(ERROR("Exception while running run() in '%s'" % mod.__name__)) if email_notifications: self.send_email_notification( notification_id=mod.__name__, include_traceback=True) if show_traceback: raise def my_import(mod): if verbosity > 1: print(NOTICE("Check for %s" % mod)) # check if module exists before importing try: importlib.import_module(mod) t = __import__(mod, [], [], [" "]) except (ImportError, AttributeError) as e: if str(e).startswith('No module named'): try: exc_type, exc_value, exc_traceback = sys.exc_info() try: if exc_traceback.tb_next.tb_next is None: return False except AttributeError: pass finally: exc_traceback = None if verbosity > 0 and not silent: if verbosity > 2: traceback.print_exc() print(ERROR("Cannot import module '%s': %s." % (mod, e))) return False if hasattr(t, "run"): if verbosity > 1: print(NOTICE2("Found script '%s' ..." % mod)) return t else: if verbosity > 1: print(ERROR2("Find script '%s' but no run() function found." % mod)) def find_modules_for_script(script): """ find script module which contains 'run' attribute """ modules = [] # first look in apps for app in apps.get_app_configs(): for subdir in subdirs: mod = my_import("%s.%s.%s" % (app.name, subdir, script)) if mod: modules.append(mod) # try app.DIR.script import sa = script.split(".") for subdir in subdirs: nn = ".".join(sa[:-1] + [subdir, sa[-1]]) mod = my_import(nn) if mod: modules.append(mod) # try direct import if script.find(".") != -1: mod = my_import(script) if mod: modules.append(mod) return modules if options.get('script_args'): script_args = options['script_args'] else: script_args = [] for script in scripts: modules = find_modules_for_script(script) if not modules: if verbosity > 0 and not silent: print(ERROR("No (valid) module for script '%s' found" % script)) if verbosity < 2: print(ERROR("Try running with a higher verbosity level like: -v2 or -v3")) for mod in modules: if verbosity > 1: print(NOTICE2("Running script '%s' ..." % mod.__name__)) run_script(mod, *script_args)
Python
0.000014
@@ -3407,16 +3407,20 @@ + t = importl @@ -3445,59 +3445,8 @@ od)%0A - t = __import__(mod, %5B%5D, %5B%5D, %5B%22 %22%5D)%0A @@ -3460,17 +3460,16 @@ except -( ImportEr @@ -3475,25 +3475,8 @@ rror -, AttributeError) as @@ -3958,80 +3958,20 @@ if -verbosity %3E 0 and not silent:%0A if verbosity %3E 2:%0A +not silent:%0A @@ -4363,17 +4363,18 @@ RROR2(%22F -i +ou nd scrip
51a5c7626b634687be57c3e6ed05ea07f6468ad0
add analyzer test
timeside/tests/api/test_analyzer.py
timeside/tests/api/test_analyzer.py
Python
0.000001
@@ -0,0 +1,1454 @@ +# -*- coding: utf-8 -*-%0A%0Aimport timeside%0Afrom sys import stdout%0Aimport os.path%0Aimport numpy%0A%0A%0Aclass TestAnalyzer:%0A %0A graphers = timeside.core.processors(timeside.api.IGrapher)%0A decoders = timeside.core.processors(timeside.api.IDecoder)%0A encoders= timeside.core.processors(timeside.api.IEncoder)%0A analyzers = timeside.core.processors(timeside.api.IAnalyzer)%0A %0A def __init__(self, path):%0A self.source = os.path.join(os.path.dirname(__file__), path)%0A print %22Processing %25s%22 %25 self.source%0A self.decoder = timeside.decoder.FileDecoder(self.source)%0A print 'format: ', self.decoder.format()%0A self.pipe = self.decoder%0A self.analyzers_sub_pipe = %5B%5D%0A%0A def process(self):%0A for analyzer in self.analyzers:%0A sub_pipe = analyzer()%0A self.analyzers_sub_pipe.append(sub_pipe)%0A self.pipe = self.pipe %7C sub_pipe%0A self.pipe.run()%0A %0A def results(self):%0A analyzers = %5B%5D%0A for analyzer in self.analyzers_sub_pipe:%0A value = analyzer.result()%0A analyzers.append(%7B'name':analyzer.name(),%0A 'id':analyzer.id(),%0A 'unit':analyzer.unit(),%0A 'value':str(value)%7D)%0A print analyzers%0A%0A%0Atest = TestAnalyzer('../samples/guitar.wav')%0A#test = TestAnalyzer('/mnt/data4/Music1/Cellar_playlist_tmp/JanoB/VirulentAcidMix.wav')%0Atest.process()%0Atest.results()%0A%0A
b634e5966c48299eda8cc9a3dcd4e8f769df6812
Create 5kyu_tree_to_list.py
Solutions/5kyu/5kyu_tree_to_list.py
Solutions/5kyu/5kyu_tree_to_list.py
Python
0.000002
@@ -0,0 +1,414 @@ +class Node:%0A def __init__(self, data, child_nodes=None):%0A self.data = data%0A self.child_nodes = child_nodes%0A%0Adef tree_to_list(tr):%0A call = to_list(tr, 0, %5B%5D)%0A return call%0A %0Adef to_list(tr, depth, res):%0A res.append(%5Btr.data, depth%5D)%0A if tr.child_nodes:%0A for i in tr.child_nodes:%0A to_list(i, depth+1, res)%0A return %5Bi%5B0%5D for i in sorted(res, key = lambda x: x%5B1%5D)%5D%0A
f1cb1cb0cdcf7ef3d5d0e286bfbd9d9664239098
Create 6kyu_alphabetized.py
Solutions/6kyu/6kyu_alphabetized.py
Solutions/6kyu/6kyu_alphabetized.py
Python
0.000033
@@ -0,0 +1,102 @@ +def alphabetized(s):%0A return ''.join(s for s in sorted(s, key=lambda s: s.lower()) if s.isalpha())%0A
2eddc73e2d7b78fbfac521eb1e6014ca26421510
Add forgotten migration
osmdata/migrations/0012_auto_20170829_1539.py
osmdata/migrations/0012_auto_20170829_1539.py
Python
0.000004
@@ -0,0 +1,566 @@ +# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.4 on 2017-08-29 15:39%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0Aimport django.db.models.deletion%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('osmdata', '0011_auto_20170824_1521'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='osmelement',%0A name='bounds',%0A field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='osmdata.Bounds'),%0A ),%0A %5D%0A
d00243d9500118400f7e08409d9564b15b2b4148
Add trivial CLI example
examples/cliExample.py
examples/cliExample.py
Python
0.000004
@@ -0,0 +1,1756 @@ +# Very Simple CLI example%0A%0Afrom OTXv2 import OTXv2%0Aimport IndicatorTypes%0Aimport argparse%0A%0A# Your API key%0AAPI_KEY = ''%0AOTX_SERVER = 'https://otx.alienvault.com/'%0Aotx = OTXv2(API_KEY, server=OTX_SERVER)%0A%0Aparser = argparse.ArgumentParser(description='Description of your program')%0Aparser.add_argument('-i', '--ip', help='IP eg; 4.4.4.4', required=False)%0Aparser.add_argument(%0A '-d', '--domain', help='Domain eg; alienvault.com', required=False)%0Aparser.add_argument('-ho', '--hostname',%0A help='Hostname eg; www.alienvault.com', required=False)%0Aparser.add_argument(%0A '-u', '--url', help='URL eg; http://www.alienvault.com', required=False)%0Aparser.add_argument(%0A '-m', '--md5', help='MD5 Hash of a file eg; 7b42b35832855ab4ff37ae9b8fa9e571', required=False)%0Aparser.add_argument(%0A '-p', '--pulse', help='Search pulses for a string eg; Dridex', required=False)%0Aparser.add_argument('-s', '--subscribed', help='Get pulses you are subscribed to',%0A required=False, action='store_true')%0A%0Aargs = vars(parser.parse_args())%0A%0Aif args%5B%22ip%22%5D:%0A print (str(otx.get_indicator_details_full(IndicatorTypes.IPv4, args%5B%22ip%22%5D)))%0A%0Aif args%5B%22domain%22%5D:%0A print (str(otx.get_indicator_details_full(IndicatorTypes.DOMAIN, args%5B%22domain%22%5D)))%0A%0Aif args%5B%22hostname%22%5D:%0A print (str(otx.get_indicator_details_full(IndicatorTypes.HOSTNAME, args%5B%22hostname%22%5D)))%0A%0Aif args%5B%22url%22%5D:%0A print (str(otx.get_indicator_details_full(IndicatorTypes.URL, args%5B%22url%22%5D)))%0A%0Aif args%5B%22md5%22%5D:%0A print (str(otx.get_indicator_details_full(IndicatorTypes.FILE_HASH_MD5, args%5B%22md5%22%5D)))%0A%0Aif args%5B%22pulse%22%5D:%0A result = otx.search_pulses(args%5B%22pulse%22%5D)%0A print (str(result.get('results')))%0A%0Aif args%5B%22subscribed%22%5D:%0A print (str(otx.getall(max_page=3, limit=5)))
ecc8a93ddda784102311ebfd4c3c93624f356778
Add migration to add strip_html sql function
cnxarchive/sql/migrations/20160723123620_add_sql_function_strip_html.py
cnxarchive/sql/migrations/20160723123620_add_sql_function_strip_html.py
Python
0
@@ -0,0 +1,335 @@ +# -*- coding: utf-8 -*-%0A%0A%0Adef up(cursor):%0A cursor.execute(%22%22%22%5C%0ACREATE OR REPLACE FUNCTION strip_html(html_text TEXT)%0A RETURNS text%0AAS $$%0A import re%0A return re.sub('%3C%5B%5E%3E%5D*?%3E', '', html_text, re.MULTILINE)%0A$$ LANGUAGE plpythonu IMMUTABLE;%0A %22%22%22)%0A%0A%0Adef down(cursor):%0A cursor.execute(%22DROP FUNCTION IF EXISTS strip_html(TEXT)%22)%0A
0f5b15a1f909c79b40a3f2655d00bc7852d41847
add missing migration
conversion_service/conversion_job/migrations/0003_auto_20151120_1528.py
conversion_service/conversion_job/migrations/0003_auto_20151120_1528.py
Python
0.000258
@@ -0,0 +1,892 @@ +# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('conversion_job', '0002_auto_20151119_1332'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='conversionjob',%0A name='status',%0A field=models.CharField(max_length=20, verbose_name='job status', default='new', choices=%5B('error', 'error'), ('new', 'new'), ('queued', 'queued'), ('started', 'started'), ('done', 'done')%5D),%0A ),%0A migrations.AlterField(%0A model_name='gisformat',%0A name='progress',%0A field=models.CharField(max_length=20, verbose_name='progress', default='new', choices=%5B('error', 'error'), ('new', 'new'), ('received', 'received'), ('started', 'started'), ('successful', 'successful')%5D),%0A ),%0A %5D%0A
a5724d42e4acaa1a81aa66b69dc72b0b5fa31eae
Update build.py
infra/gcb/build.py
infra/gcb/build.py
#!/usr/bin/python2 """Starts project build on Google Cloud Builder. Usage: build.py <project_dir> """ import base64 import collections import datetime import os import subprocess import sys import time import urllib import yaml from oauth2client.client import GoogleCredentials from oauth2client.service_account import ServiceAccountCredentials from googleapiclient.discovery import build CONFIGURATIONS = { 'sanitizer-address' : [ 'SANITIZER=address' ], 'sanitizer-memory' : [ 'SANITIZER=memory' ], 'sanitizer-undefined' : [ 'SANITIZER=undefined' ], 'engine-libfuzzer' : [ 'FUZZING_ENGINE=libfuzzer' ], 'engine-afl' : [ 'FUZZING_ENGINE=afl' ], } EngineInfo = collections.namedtuple( 'EngineInfo', ['upload_bucket', 'supported_sanitizers']) ENGINE_INFO = { 'libfuzzer': EngineInfo( upload_bucket='clusterfuzz-builds', supported_sanitizers=['address', 'memory', 'undefined']), 'afl': EngineInfo( upload_bucket='clusterfuzz-builds-afl', supported_sanitizers=['address']), } DEFAULT_ENGINES = ['libfuzzer', 'afl'] DEFAULT_SANITIZERS = ['address', 'undefined'] def usage(): sys.stderr.write( "Usage: " + sys.argv[0] + " <project_dir>\n") exit(1) def load_project_yaml(project_dir): project_name = os.path.basename(project_dir) project_yaml_path = os.path.join(project_dir, 'project.yaml') with open(project_yaml_path) as f: project_yaml = yaml.safe_load(f) project_yaml.setdefault('name', project_name) project_yaml.setdefault('image', 'gcr.io/clusterfuzz-external/oss-fuzz/' + project_name) project_yaml.setdefault('sanitizers', DEFAULT_SANITIZERS) project_yaml.setdefault('fuzzing_engines', DEFAULT_ENGINES) return project_yaml def get_signed_url(path): timestamp = int(time.time() + 60 * 60 * 5) blob = 'PUT\n\n\n{0}\n{1}'.format( timestamp, path) creds = ServiceAccountCredentials.from_json_keyfile_name( os.environ['GOOGLE_APPLICATION_CREDENTIALS']) client_id = creds.service_account_email signature = base64.b64encode(creds.sign_blob(blob)[1]) values = { 'GoogleAccessId': client_id, 'Expires': timestamp, 'Signature': signature, } return ('https://storage.googleapis.com{0}?'.format(path) + urllib.urlencode(values)) def is_supported_configuration(fuzzing_engine, sanitizer): return sanitizer in ENGINE_INFO[fuzzing_engine].supported_sanitizers def get_build_steps(project_yaml): name = project_yaml['name'] image = project_yaml['image'] ts = datetime.datetime.now().strftime('%Y%m%d%H%M') build_steps = [ { 'name': 'gcr.io/cloud-builders/docker', 'args': [ 'build', '-t', image, '.', ], 'dir': 'projects/' + name, }, { 'name': image, 'args': [ 'bash', '-c', 'srcmap > /workspace/srcmap.json && cat /workspace/srcmap.json' ], 'env': [ 'OSSFUZZ_REVISION=$REVISION_ID' ], }, ] for fuzzing_engine in project_yaml['fuzzing_engines']: for sanitizer in project_yaml['sanitizers']: if not is_supported_configuration(fuzzing_engine, sanitizer): continue env = CONFIGURATIONS['engine-' + fuzzing_engine][:] env.extend(CONFIGURATIONS['sanitizer-' + sanitizer]) out = '/workspace/out/' + sanitizer stamped_name = name + '-' + sanitizer + '-' + ts zip_file = stamped_name + '.zip' stamped_srcmap_file = stamped_name + '.srcmap.json' bucket = ENGINE_INFO[fuzzing_engine].upload_bucket upload_url = get_signed_url('/{0}/{1}/{2}'.format( bucket, name, zip_file)) srcmap_url = get_signed_url('/{0}/{1}/{2}'.format( bucket, name, stamped_srcmap_file)) env.append('OUT=' + out) build_steps.extend([ # compile {'name': image, 'env' : env, 'args': [ 'bash', '-c', 'cd /src/{1} && mkdir -p {0} && compile'.format(out, name), ], }, # zip binaries {'name': image, 'args': [ 'bash', '-c', 'cd {0} && zip -r {1} *'.format(out, zip_file) ], }, # upload binaries {'name': 'gcr.io/clusterfuzz-external/uploader', 'args': [ os.path.join(out, zip_file), upload_url, ], }, # upload srcmap {'name': 'gcr.io/clusterfuzz-external/uploader', 'args': [ '/workspace/srcmap.json', srcmap_url, ], }, # cleanup {'name': image, 'args': [ 'bash', '-c', 'rm -r ' + out, ], }, ]) return build_steps def get_logs_url(build_id): URL_FORMAT = ('https://console.developers.google.com/logs/viewer?' 'resource=build%2Fbuild_id%2F{0}&project=clusterfuzz-external') return URL_FORMAT.format(build_id) def main(): if len(sys.argv) != 2: usage() project_dir = sys.argv[1] project_yaml = load_project_yaml(project_dir) options = {} if "GCB_OPTIONS" in os.environ: options = yaml.safe_load(os.environ["GCB_OPTIONS"]) build_body = { 'source': { 'repoSource': { 'branchName': 'master', 'projectId': 'clusterfuzz-external', 'repoName': 'oss-fuzz', }, }, 'steps': get_build_steps(project_yaml), 'timeout': str(4 * 3600) + 's', 'options': options, 'logsBucket': 'oss-fuzz-gcb-logs', 'images': [ project_yaml['image'] ], } credentials = GoogleCredentials.get_application_default() cloudbuild = build('cloudbuild', 'v1', credentials=credentials) build_info = cloudbuild.projects().builds().create( projectId='clusterfuzz-external', body=build_body).execute() build_id = build_info['metadata']['build']['id'] print >>sys.stderr, 'Logs:', get_logs_url(build_id) print build_id if __name__ == "__main__": main()
Python
0.000001
@@ -1533,37 +1533,16 @@ 'gcr.io/ -clusterfuzz-external/ oss-fuzz
86fdca61d8a53270d4aa28c3105f7948b04cb6f3
use first-3 octets to identify unknowns
hops.py
hops.py
#!/usr/bin/python import socket import struct import sys # # from the web! # def iptoint(ip): return int(socket.inet_aton(ip).encode('hex'),16) def inttoip(ip): return socket.inet_ntoa(hex(ip)[2:].zfill(8).decode('hex')) AS2NAME={} def get_asno(as_raw): as_long = as_raw.replace('"','').replace('&','').replace('-', ' ') as_split = as_long.split(' ') as_no = as_split[0].strip() AS2NAME[as_no] = as_no.strip() if len(as_split) > 1: AS2NAME[as_no] = as_split[1].strip() return as_no def as_array(filename,skip_header=True): ases = {} f = open(filename, 'r') header_skipped = False counter = 0 for line in f: if not header_skipped and skip_header: header_skipped=True continue s = line.split(',') as_no = get_asno(s[2]) low = int(s[0]) high = int(s[1]) ases[counter] = (as_no,low,high) counter += 1 f.close() return ases def lookup_as(ip, ases, cache): if (cache and ip in cache): return cache[ip] for as_and_range in ases: (as_no,low,high) = ases[as_and_range] if (low <= ip and ip <= high): if cache: cache[ip] = as_no return as_no # note: first-char as '[a-z]' make graphviz easier. return "x"+inttoip(ip).replace(".", "") def rate_array(filename, skip_header=True): rate = {} f = open(filename, 'r') header_skipped = False total_rates = 0 for line in f: if not header_skipped and skip_header: header_skipped=True continue s = line.split(',') site = (s[1].rpartition('.'))[0] client = s[2] index = site + "," + client bw = float(s[3]) if index not in rate: rate[index] = [] rate[index].append(bw) total_rates += 1 print "Found %s raw, client rates" % total_rates f.close() return rate def hop_array(filename, rates, skip_header=True): hops = {} f = open(filename, 'r') header_skipped = False hop_rates = 0 hop_count = 0 hop_rates_saved = {} for line in f: if not header_skipped and skip_header: header_skipped=True continue s = line.split(',') site = (s[1].rpartition('.'))[0] client = s[2] hop_a = iptoint(s[3]) hop_b = iptoint(s[4]) #print "Another data point %s -> %s" % (s[3], s[4]) rates_index = site + "," + client if (hop_a not in hops): hops[hop_a] = {} if (hop_b not in hops[hop_a]): hops[hop_a][hop_b] = [] if (rates_index,hop_a,hop_b) not in hop_rates_saved: # Save rates between all distinct pairs of rates_index,hop_a,hop_b hop_rates_saved[(rates_index,hop_a,hop_b)] = True hops[hop_a][hop_b] += rates[rates_index] hop_rates += len(rates[rates_index]) hop_count += 1 print "Assigned %s rates to %s distinct hops" % (hop_rates, hop_count) f.close() return hops def asify_hop_array(hops, ases): as_hops = {} as_cache = {} len_hop_a = len(hops) i_progress = 0.0 i_rates = 0 hop_saved = {} hop_count = 0 for hop_a in hops: msg = "Finding primary, AS-Hop pairs ... %0.2f%%" % (100*i_progress/len_hop_a) sys.stdout.write("\b"*len(msg)) sys.stdout.write(msg) sys.stdout.flush() for hop_b in hops[hop_a]: as_hop_a = lookup_as(hop_a, ases, as_cache) as_hop_b = lookup_as(hop_b, ases, as_cache) if as_hop_a not in as_hops: as_hops[as_hop_a] = {} if as_hop_b not in as_hops[as_hop_a]: as_hops[as_hop_a][as_hop_b] = [] if (hop_a,hop_b) not in hop_saved: hop_saved[(hop_a,hop_b)] = True as_hops[as_hop_a][as_hop_b] += hops[hop_a][hop_b] i_rates += len(hops[hop_a][hop_b]) hop_count += 1 i_progress+=1.0 print "\nFound %s rates in %s distinct AS hops" % (i_rates, hop_count) return as_hops def write_hop_array(filename, hops): f = open(filename, 'w') f.write("as1,as2,count,rate\n") for ashop_a in hops.keys(): for ashop_b in hops[ashop_a].keys(): # # The number of results reported is a # little off because of fence-post # issue with trailing , in the list # cnt_test = len(hops[ashop_a][ashop_b]) avg_test = 0 if cnt_test > 0: avg_test = sum(hops[ashop_a][ashop_b])/cnt_test if ashop_a not in AS2NAME: AS2NAME[ashop_a] = ashop_a if ashop_b not in AS2NAME: AS2NAME[ashop_b] = ashop_b output = [ashop_a, AS2NAME[ashop_a], ashop_b, AS2NAME[ashop_b], str(cnt_test), str(avg_test) ] f.write(",".join(output) + "\n") f.close() isp = sys.argv[1] site= sys.argv[2] rate = rate_array("cache/stage1.%s.%s.sql.csv" % (isp,site)) ases = as_array("GeoIPASNum2.csv", 0) hops = hop_array("cache/stage3.%s.%s.sql.csv" % (isp,site), rate) as_hops = asify_hop_array(hops, ases) write_hop_array("cache/hops.%s.%s.csv" %(isp,site), as_hops) #print lookup_as(iptoint("8.8.8.8"), ases, None)
Python
0.000071
@@ -1214,16 +1214,93 @@ easier.%0A + # note: also only return first three octets to reduce number of 'unknowns'%0A return @@ -1304,16 +1304,17 @@ urn %22x%22+ +( inttoip( @@ -1317,16 +1317,36 @@ oip(ip). +rpartition('.'))%5B0%5D. replace( @@ -1349,17 +1349,16 @@ ace(%22.%22, - %22%22)%0A%0Adef
ed45aa20bc54714c6eb355417520c3d90a6b47fc
Add init.py
init.py
init.py
Python
0.000063
@@ -0,0 +1,426 @@ +#!/usr/bin/env python%0A%0Aimport os%0Aimport sys%0Aimport django%0A%0Aos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'readthedocs.settings.dev')%0Asys.path.append(os.getcwd())%0Adjango.setup()%0A%0Afrom django.contrib.auth.models import User%0Aadmin = User.objects.create_user('admin', '', 'admin')%0Aadmin.is_superuser = True%0Aadmin.is_staff = True%0Aadmin.save()%0Atest = User.objects.create_user('test', '', 'test')%0Atest.is_staff = True%0Atest.save()%0A%0A