commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
fb37af691d63ab8a43d50701d6b1f8ae027e2e1b
Create dfirwizard.py
dfirwizard.py
dfirwizard.py
Python
0
@@ -0,0 +1,399 @@ +#!/usr/bin/python%0A# Sample program or step 1 in becoming a DFIR Wizard!%0A# No license as this code is simple and free!%0Aimport sys%0Aimport pytsk3%0Aimagefile = %22Stage2.vhd%22%0Aimagehandle = pytsk3.Img_Info(imagefile)%0ApartitionTable = pytsk3.Volume_Info(imagehandle)%0Afor partition in partitionTable:%0A print partition.addr, partition.desc, %22%25ss(%25s)%22 %25 (partition.start, partition.start * 512), partition.len%0A
ac83a8bbef2c61021c39c77ef3c14675383edc62
Fix a typo.
packs/st2/actions/lib/action.py
packs/st2/actions/lib/action.py
from st2actions.runners.pythonrunner import Action from st2client.client import Client from st2client.models.datastore import KeyValuePair # pylint: disable=no-name-in-module from lib.utils import filter_none_values __all__ = [ 'St2BaseAction' ] class St2BaseAction(Action): def __init__(self, config): super(St2BaseAction, self).__init__(config) self._client = Client self._kvp = KeyValuePair self.client = self._get_client() def _get_client(self): host = self.config['base_url'] try: return self._client(base_url=host) except Exception as e: return e def _run_client_method(self, method, method_kwargs, format_func): """ Run the provided client method and format the result. :param method: Client method to run. :type method: ``func`` :param method_kwargs: Keyword arguments passed to the client method. :type method_kwargs: ``dict`` :param format_func: Function for formatting the result. :type format_func: ``func`` :rtype: ``list`` of ``dict`` """ # Filter out parameters with string value of "None" # This is a work around since the default values can only be strings method_kwargs = filter_none_values(method_kwargs) method_name = method.__name__ self.logger.debug('Calling client method "%s" with kwargs "%s"' % (method_name, method_kwargs)) result = method(**method_kwargs) result = format_func(result) return result
Python
0.999957
@@ -106,16 +106,15 @@ els. -datastor +keyvalu e im
d8319f2189f9383038a5ac28aeebc71f56f51687
Make pyxdg optional.
udiskie/mount.py
udiskie/mount.py
import warnings warnings.filterwarnings("ignore", ".*could not open display.*", Warning) warnings.filterwarnings("ignore", ".*g_object_unref.*", Warning) import logging import optparse import os import dbus import gobject import gio import pynotify from xdg.BaseDirectory import xdg_config_home import udiskie.device import udiskie.match class DeviceState: def __init__(self, mounted, has_media): self.mounted = mounted self.has_media = has_media class AutoMounter: CONFIG_PATH = 'udiskie/filters.conf' def __init__(self, bus=None, filter_file=None): self.log = logging.getLogger('udiskie.mount.AutoMounter') self.last_device_state = {} if not bus: from dbus.mainloop.glib import DBusGMainLoop DBusGMainLoop(set_as_default=True) self.bus = dbus.SystemBus() else: self.bus = bus if not filter_file: filter_file = os.path.join(xdg_config_home, self.CONFIG_PATH) self.filters = udiskie.match.FilterMatcher((filter_file,)) self.bus.add_signal_receiver(self.device_added, signal_name='DeviceAdded', bus_name='org.freedesktop.UDisks') self.bus.add_signal_receiver(self.device_removed, signal_name='DeviceRemoved', bus_name='org.freedesktop.UDisks') self.bus.add_signal_receiver(self.device_changed, signal_name='DeviceChanged', bus_name='org.freedesktop.UDisks') def _mount_device(self, device): if device.is_handleable(): try: if not device.is_mounted(): fstype = str(device.id_type()) options = self.filters.get_mount_options(device) S = 'attempting to mount device %s (%s:%s)' self.log.info(S % (device, fstype, options)) try: device.mount(fstype, options) self.log.info('mounted device %s' % (device,)) except dbus.exceptions.DBusException, dbus_err: self.log.error('failed to mount device %s: %s' % (device, dbus_err)) return mount_paths = ', '.join(device.mount_paths()) try: pynotify.Notification('Device mounted', '%s mounted on %s' % (device.device_file(), mount_paths), 'drive-removable-media').show() except gio.Error: pass finally: self._store_device_state(device) def _store_device_state(self, device): state = DeviceState(device.is_mounted(), device.has_media()) self.last_device_state[device.device_path] = state def _remove_device_state(self, device): if device.device_path in self.last_device_state: del self.last_device_state[device.device_path] def _get_device_state(self, device): return self.last_device_state.get(device.device_path) def mount_present_devices(self): """Mount handleable devices that are already present.""" for device in udiskie.device.get_all(self.bus): self._mount_device(device) def device_added(self, device): self.log.debug('device added: %s' % (device,)) udiskie_device = udiskie.device.Device(self.bus, device) # Since the device just appeared we don't want the old state. self._remove_device_state(udiskie_device) self._mount_device(udiskie_device) def device_removed(self, device): self.log.debug('device removed: %s' % (device,)) self._remove_device_state(udiskie.device.Device(self.bus, device)) def device_changed(self, device): self.log.debug('device changed: %s' % (device,)) udiskie_device = udiskie.device.Device(self.bus, device) last_state = self._get_device_state(udiskie_device) if not last_state: # First time we saw the device, try to mount it. self._mount_device(udiskie_device) else: media_added = False if udiskie_device.has_media() and not last_state.has_media: media_added = True if media_added and not last_state.mounted: # Wasn't mounted before, but it has new media now. self._mount_device(udiskie_device) self._store_device_state(udiskie_device) def cli(args): parser = optparse.OptionParser() parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False, help='verbose output') parser.add_option('-f', '--filters', action='store', dest='filters', default=None, metavar='FILE', help='filter FILE') (options, args) = parser.parse_args(args) log_level = logging.INFO if options.verbose: log_level = logging.DEBUG logging.basicConfig(level=log_level, format='%(message)s') pynotify.init('udiskie.mount') mounter = AutoMounter(bus=None, filter_file=options.filters) mounter.mount_present_devices() return gobject.MainLoop().run()
Python
0
@@ -245,16 +245,25 @@ notify%0A%0A +try:%0A from xdg @@ -299,16 +299,90 @@ fig_home +%0Aexcept ImportError:%0A xdg_config_home = os.path.expanduser('~/.config') %0A%0Aimport
7ff614950163b1fb6a8fe0fef5b8de9bfa3a9d85
Add a test for the hard-coded re() partial frac form
transmutagen/tests/test_partialfrac.py
transmutagen/tests/test_partialfrac.py
Python
0.000035
@@ -0,0 +1,377 @@ +from sympy import together, expand_complex, re, im, symbols%0A%0Afrom ..partialfrac import t%0A%0Adef test_re_form():%0A theta, alpha = symbols('theta, alpha')%0A%0A # Check that this doesn't change%0A re_form = together(expand_complex(re(alpha/(t - theta))))%0A assert re_form == (t*re(alpha) - re(alpha)*re(theta) -%0A im(alpha)*im(theta))/((t - re(theta))**2 + im(theta)**2)%0A
7522ffb9f6934de02d5d326d5f798d42a2da800d
add script to find old experimental apis
pdfium/find_old_experimental.py
pdfium/find_old_experimental.py
Python
0
@@ -0,0 +1,1574 @@ +#!/usr/bin/env python3%0A#%0A# Copyright 2019 Miklos Vajna. All rights reserved.%0A# Use of this source code is governed by a BSD-style license that can be%0A# found in the LICENSE file.%0A#%0A%0A%22%22%22Finds my old + experimental APIs.%22%22%22%0A%0Aimport subprocess%0Aimport time%0A%0A%0Adef main() -%3E None:%0A %22%22%22Commandline interface to this module.%22%22%22%0A apis_bytes = subprocess.check_output(%5B%22git%22, %22grep%22, %22-n%22, %22Experimental API%22, %22public/%22%5D)%0A apis = apis_bytes.decode(%22utf-8%22).strip().split(%22%5Cn%22)%0A author_date_loc = %5B%5D%0A for api in apis:%0A tokens = api.split(%22:%22)%0A path = tokens%5B0%5D%0A line_num = tokens%5B1%5D%0A blame_bytes = subprocess.check_output(%5B%22git%22, %22blame%22, %22--porcelain%22, %22-L%22, line_num + %22,%22 + line_num, path%5D)%0A blame_lines = blame_bytes.decode(%22utf-8%22).strip().split(%22%5Cn%22)%0A date = 0%0A author = %22%22%0A for line in blame_lines:%0A if line.startswith(%22author-time%22):%0A tokens = line.split(%22 %22)%0A date = int(tokens%5B1%5D)%0A elif line.startswith(%22author %22):%0A tokens = line.split(%22 %22)%0A author = tokens%5B1%5D%0A author_date_loc.append((author, date, path + %22:%22 + line_num))%0A author_date_loc = sorted(author_date_loc, key=lambda x: x%5B1%5D)%0A today = time.time()%0A for author, date, loc in author_date_loc:%0A if author != %22Miklos%22:%0A continue%0A # Year in seconds.%0A if date %3E= today - 3 * 31536000:%0A continue%0A parsed_date = time.localtime(date)%0A date_string = time.strftime(%22%25Y-%25m-%25d%22, parsed_date)%0A print(%22date: '%22+date_string+%22', loc: %22+loc+%22%22)%0A%0A%0Aif __name__ == %22__main__%22:%0A main()%0A%0A# vim:set shiftwidth=4 softtabstop=4 expandtab:%0A
05dd8bdfeab63b3096e8f7d98032088133d1f0e5
Add function provider to get osm data
campaign_manager/provider.py
campaign_manager/provider.py
Python
0
@@ -0,0 +1,1554 @@ +import json%0Aimport hashlib%0Aimport os%0A%0Afrom reporter import config%0Afrom reporter.utilities import (%0A split_bbox,%0A)%0Afrom reporter.osm import (%0A load_osm_document%0A)%0Afrom urllib.parse import quote%0Afrom reporter.queries import TAG_MAPPING, OVERPASS_QUERY_MAP%0A%0A%0Adef get_osm_data(bbox, feature):%0A %22%22%22Get osm data.%0A %0A :param bbox: String describing a bbox e.g. '106.78674459457397,%0A -6.141301491467023,106.80691480636597,-6.133834354201348'%0A%0A :param feature: The type of feature to extract:%0A buildings, building-points, roads, potential-idp, boundary-%5B1,11%5D%0A :type feature: str%0A %0A :returns: A dict from retrieved OSM dataset.%0A :rtype: dict%0A %22%22%22%0A server_url = 'http://overpass-api.de/api/interpreter?data='%0A%0A tag_name = feature%0A overpass_verbosity = 'body'%0A%0A try:%0A coordinates = split_bbox(bbox)%0A except ValueError:%0A error = %22Invalid area%22%0A coordinates = split_bbox(config.BBOX)%0A%0A feature_type = TAG_MAPPING%5Btag_name%5D%0A parameters = coordinates%0A parameters%5B'print_mode'%5D = overpass_verbosity%0A query = OVERPASS_QUERY_MAP%5Bfeature_type%5D.format(**parameters)%0A%0A # Query to returns json string%0A query = '%5Bout:json%5D;' + query%0A%0A encoded_query = quote(query)%0A url_path = '%25s%25s' %25 (server_url, encoded_query)%0A%0A safe_name = hashlib.md5(query.encode('utf-8')).hexdigest() + '.osm'%0A file_path = os.path.join(config.CACHE_DIR, safe_name)%0A osm_document = load_osm_document(file_path, url_path)%0A%0A osm_data = json.loads(osm_document.read())%0A%0A return osm_data%0A
9305f158b71f65923ee37de2805324db362e0db6
Add DRF LocalDateTimeField
arcutils/drf/serializers.py
arcutils/drf/serializers.py
Python
0
@@ -0,0 +1,325 @@ +from django.utils import timezone%0A%0Afrom rest_framework import serializers%0A%0A%0Aclass LocalDateTimeField(serializers.DateTimeField):%0A%0A %22%22%22Converts datetime to local time before serialization.%22%22%22%0A%0A def to_representation(self, value):%0A value = timezone.localtime(value)%0A return super().to_representation(value)%0A
3f9aae149dba5c9b68ff6f7fd83cadf3fd6b1d7d
Add automorphic number implementation (#7978)
maths/automorphic_number.py
maths/automorphic_number.py
Python
0
@@ -0,0 +1,1558 @@ +%22%22%22%0A== Automorphic Numbers ==%0AA number n is said to be a Automorphic number if%0Athe square of n %22ends%22 in the same digits as n itself.%0A%0AExamples of Automorphic Numbers: 0, 1, 5, 6, 25, 76, 376, 625, 9376, 90625, ...%0Ahttps://en.wikipedia.org/wiki/Automorphic_number%0A%22%22%22%0A%0A# Author : Akshay Dubey (https://github.com/itsAkshayDubey)%0A# Time Complexity : O(log10n)%0A%0A%0Adef is_automorphic_number(number: int) -%3E bool:%0A %22%22%22%0A # doctest: +NORMALIZE_WHITESPACE%0A This functions takes an integer number as input.%0A returns True if the number is automorphic.%0A %3E%3E%3E is_automorphic_number(-1)%0A False%0A %3E%3E%3E is_automorphic_number(0)%0A True%0A %3E%3E%3E is_automorphic_number(5)%0A True%0A %3E%3E%3E is_automorphic_number(6)%0A True%0A %3E%3E%3E is_automorphic_number(7)%0A False%0A %3E%3E%3E is_automorphic_number(25)%0A True%0A %3E%3E%3E is_automorphic_number(259918212890625)%0A True%0A %3E%3E%3E is_automorphic_number(259918212890636)%0A False%0A %3E%3E%3E is_automorphic_number(740081787109376)%0A True%0A %3E%3E%3E is_automorphic_number(5.0)%0A Traceback (most recent call last):%0A ...%0A TypeError: Input value of %5Bnumber=5.0%5D must be an integer%0A %22%22%22%0A if not isinstance(number, int):%0A raise TypeError(f%22Input value of %5Bnumber=%7Bnumber%7D%5D must be an integer%22)%0A if number %3C 0:%0A return False%0A number_square = number * number%0A while number %3E 0:%0A if number %25 10 != number_square %25 10:%0A return False%0A number //= 10%0A number_square //= 10%0A return True%0A%0A%0Aif __name__ == %22__main__%22:%0A import doctest%0A%0A doctest.testmod()%0A
34391723f44c81ceab77fd3200ee34c9f1b2d4b2
add plugin factory
pilot/common/pluginfactory.py
pilot/common/pluginfactory.py
Python
0
@@ -0,0 +1,1404 @@ +#!/usr/bin/env python%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Authors:%0A# - Wen Guan, [email protected], 2018%0A%0A%0Aimport logging%0Alogger = logging.getLogger(__name__)%0A%0A%22%22%22%0AA factory to manage plugins%0A%22%22%22%0A%0A%0Aclass PluginFactory(object):%0A%0A def __init__(self, *args, **kwargs):%0A self.classMap = %7B%7D%0A%0A def get_plugin(self, confs):%0A %22%22%22%0A Load plugin class%0A%0A :param confs: a dict of configurations.%0A %22%22%22%0A%0A class_name = confs%5B'class'%5D%0A if class_name is None:%0A logger.error(%22%5Bclass%5D is not defined in confs: %25s%22 %25 confs)%0A return None%0A%0A if class_name not in self.classMap:%0A logger.info(%22Trying to import %25s%22 %25 class_name)%0A components = class_name.split('.')%0A mod = __import__('.'.join(components%5B:-1%5D))%0A for comp in components%5B1:%5D:%0A mod = getattr(mod, comp)%0A self.classMap%5Bclass_name%5D = mod%0A%0A args = %7B%7D%0A for key in confs:%0A if key in %5B'class'%5D:%0A continue%0A args%5Bkey%5D = confs%5Bkey%5D%0A%0A cls = self.classMap%5Bclass_name%5D%0A logger.info(%22Importing %25s with args: %25s%22 %25 (cls, args))%0A impl = cls(**args)%0A%0A return impl%0A
9f016a58a98ba89b9feae68dd01e752d75a628ec
Update test_client.py
tests/test_client.py
tests/test_client.py
import pytest from mock import patch, Mock from plaid import Client, require_access_token def test_require_access_token_decorator(): class TestClass(object): access_token = 'foo' @require_access_token def some_func(self): return True obj = TestClass() obj.some_func() def test_require_access_token_decorator_raises(): class TestClass(object): access_token = None @require_access_token def some_func(self): return True obj = TestClass() with pytest.raises(Exception): obj.some_func() def test_connect(): with patch('requests.post') as mock_requests_post: mock_response = Mock() mock_response.content = '{}' mock_requests_post.return_value = mock_response client = Client('myclientid', 'mysecret') account_type = 'bofa' username = 'foo' password = 'bar' email = '[email protected]' response = client.connect(account_type, username, password, email) assert mock_response == response def test_step(): with patch('requests.post') as mock_requests_post: client = Client('myclientid', 'mysecret', 'token') client.step('bofa', 'foo') assert mock_requests_post.called def test_step_requires_access_token(): client = Client('myclientid', 'mysecret') with pytest.raises(Exception): client.step('bofa', 'foo') def test_delete_user(): with patch('requests.delete') as mock_requests_delete: client = Client('myclientid', 'mysecret', 'token') client.delete_user() assert mock_requests_delete.called def test_delete_user_requires_access_token(): client = Client('myclientid', 'mysecret') with pytest.raises(Exception): client.delete_user('bofa', 'foo') def test_transactions(): with patch('requests.get') as mock_requests_get: client = Client('myclientid', 'mysecret', 'token') ret = client.transactions() assert mock_requests_get.called assert ret is not None def test_transactions_requires_access_token(): client = Client('myclientid', 'mysecret') with pytest.raises(Exception): client.transactions() def test_balance(): with patch('requests.get') as mock_requests_get: client = Client('myclientid', 'mysecret', 'token') ret = client.balance() assert mock_requests_get.called assert not ret is None def test_balance_requires_access_token(): client = Client('myclientid', 'mysecret') with pytest.raises(Exception): client.balance() def test_entity(): with patch('requests.get') as mock_requests_get: client = Client('myclientid', 'mysecret') client.entity(1) assert mock_requests_get.called def test_categories(): with patch('requests.get') as mock_requests_get: client = Client('myclientid', 'mysecret') client.categories() assert mock_requests_get.called def test_category(): with patch('requests.get') as mock_requests_get: client = Client('myclientid', 'mysecret') client.category(1) assert mock_requests_get.called def test_categories_by_mapping(): with patch('requests.get') as mock_requests_get: client = Client('myclientid', 'mysecret') client.categories_by_mapping('Food > Spanish Restaurant', 'plaid') assert mock_requests_get.called
Python
0.000002
@@ -2451,19 +2451,19 @@ ert -not ret is +not None
9346ca997d723cbfedf383eb78db2f62552f8a7c
Fix empty image list test.
tests/test_comics.py
tests/test_comics.py
# -*- coding: iso-8859-1 -*- # Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs # Copyright (C) 2012 Bastian Kleineidam import tempfile import shutil from itertools import islice from unittest import TestCase from dosagelib import scraper class _ComicTester(TestCase): """Basic comic test class.""" scraperclass=None def setUp(self): self.name = self.scraperclass.get_name() def test_comic(self): # Test a scraper. It must be able to traverse backward for # at least 5 pages from the start, and find strip images # on at least 4 pages. scraperobj = self.scraperclass() num = empty = 0 for strip in islice(scraperobj.getAllStrips(), 0, 5): images = strip.getImages() if len(images) == 0: empty += 1 for image in images: self.save(image) num += 1 self.check(num >= 4, 'traversal failed after %d strips.' % num) self.check(empty <= 1, 'failed to find images on %d pages.' % empty) def save(self, image): # create a temporary directory tmpdir = tempfile.mkdtemp() try: image.save(tmpdir) except Exception, msg: self.check(False, 'could not save to %s: %s' % (tmpdir, msg)) finally: shutil.rmtree(tmpdir) def check(self, condition, msg): self.assertTrue(condition, "%s: %s" % (self.name, msg)) def generate_comic_testers(): """For each comic scraper, create a test class.""" # Limit number of scraper tests for now max_scrapers = 10 for scraperclass in islice(scraper.get_scrapers(), 0, max_scrapers): name = 'Test'+scraperclass.__name__ globals()[name] = type(name, (_ComicTester,), dict(scraperclass=scraperclass) ) generate_comic_testers()
Python
0.000076
@@ -741,16 +741,43 @@ images = + 0%0A for image in strip.g @@ -786,17 +786,19 @@ Images() -%0A +:%0A @@ -805,28 +805,22 @@ -if len(images) == 0: + images += 1 %0A @@ -836,18 +836,24 @@ -empty += 1 +self.save(image) %0A @@ -861,28 +861,22 @@ -for image in +if not images: @@ -892,32 +892,26 @@ -self.save(image) +empty += 1 %0A
61cd24aef4c9c8ef72527e75991c23873892ec3b
Change listener module file
platform/listener/__init__.py
platform/listener/__init__.py
Python
0.000001
@@ -0,0 +1,61 @@ +'''%0AModule to handle data synchronization with contacts.%0A'''%0A
18378b201cae7e23889031044fa6ddbaf50946c5
check langauge detecting for lett files where we know the expetected language from the URL
baseline/check_lett_lang.py
baseline/check_lett_lang.py
Python
0
@@ -0,0 +1,2436 @@ +#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0Aimport sys%0Aimport os%0A%0Adoc2lang = %7B%7D%0A%0Aif __name__ == %22__main__%22:%0A import argparse%0A parser = argparse.ArgumentParser()%0A parser.add_argument('referencepairs', type=argparse.FileType('r'))%0A parser.add_argument('-slang', help='Source language', default='en')%0A parser.add_argument('-tlang', help='Non-english language', default='fr')%0A parser.add_argument('-prefix', help='prefix added to make filenames',%0A default=%22/fs/syn0/pkoehn/crawl/data/site-crawls%22)%0A%0A args = parser.parse_args(sys.argv%5B1:%5D)%0A%0A # read all the .lett files from stdin%0A%0A for line in sys.stdin:%0A line = line.split(%22%5Ct%22)%0A if len(line) != 6:%0A # sys.stderr.write(%22broken format: %25s%5Cn%22 %25 line%5B0%5D)%0A continue%0A lang = line%5B0%5D%0A filename = line%5B3%5D.strip()%0A if filename in doc2lang:%0A sys.stderr.write(%22Duplicate entry: %25s:%25s%5Cn%22 %25 (filename, lang))%0A doc2lang%5Bfilename%5D = lang%0A # print filename, lang%0A%0A correct = 0%0A total = 0%0A unknown = 0%0A unknown_but_file = 0%0A wrong_lang_pair = 0%0A%0A for line in args.referencepairs:%0A total += 1%0A domain, a, b = line.split(%22%5Ct%22)%0A a = a.strip()%0A b = b.strip()%0A%0A found = True%0A for f in (a, b):%0A if f not in doc2lang:%0A sys.stderr.write(%22unknown file %25s%5Cn%22 %25 (f))%0A unknown += 1%0A%0A filename = os.path.join(args.prefix, f.split(%22/%22)%5B0%5D, f)%0A if os.path.isfile(filename):%0A sys.stderr.write(%22but file %25s exists%5Cn%22 %25 (filename))%0A unknown_but_file += 1%0A%0A found = False%0A elif doc2lang%5Bf%5D not in (args.slang, args.tlang):%0A sys.stderr.write(%22%25s detected as neither %25s or %25s%5Cn%22%0A %25 (f, args.slang, args.tland))%0A wrong_lang_pair += 1%0A found = False%0A%0A if not found:%0A continue%0A%0A if doc2lang%5Ba%5D == doc2lang%5Bb%5D:%0A sys.stderr.write(%22Found both %25s and %25s to be in %25s%5Cn%22%0A %25 (a, b, doc2lang%5Bb%5D))%0A wrong_lang_pair += 1%0A continue%0A%0A correct += 1%0A%0A print %22Total: %22, total%0A print %22Possible: %22, correct%0A print %22Unknown: %22, unknown%0A print %22Unknown but file exists: %22, unknown_but_file%0A print %22Wrong_lang_pair: %22, wrong_lang_pair%0A
1bbfb6fe5080de9326bd7a35afe893bf59744bdf
add ASGI plugin/middleware tests.
honeybadger/tests/contrib/test_asgi.py
honeybadger/tests/contrib/test_asgi.py
Python
0
@@ -0,0 +1,1735 @@ +import pprint%0Aimport unittest%0Afrom async_asgi_testclient import TestClient%0Aimport aiounittest%0Aimport mock%0Afrom honeybadger import contrib%0A%0Aclass SomeError(Exception):%0A pass%0A%0Adef asgi_app():%0A %22%22%22Example ASGI App.%22%22%22%0A async def app(scope, receive, send):%0A if %22error%22 in scope%5B%22path%22%5D:%0A raise SomeError(%22Some Error.%22)%0A headers = %5B(b%22content-type%22, b%22text/html%22)%5D%0A body = f%22%3Cpre%3E%7Bpprint.PrettyPrinter(indent=2, width=256).pformat(scope)%7D%3C/pre%3E%22%0A await send(%7B%22type%22: %22http.response.start%22, %22status%22: 200, %22headers%22: headers%7D)%0A await send(%7B%22type%22: %22http.response.body%22, %22body%22: body%7D)%0A return app%0A%0Aclass ASGIPluginTestCase(unittest.TestCase):%0A def setUp(self):%0A self.client = TestClient(contrib.ASGIHoneybadger(asgi_app(), api_key=%22abcd%22))%0A%0A @mock.patch(%22honeybadger.contrib.asgi.honeybadger%22)%0A def test_should_support_asgi(self, hb):%0A asgi_context = %7B%22asgi%22: %7B%22version%22: %223.0%22%7D%7D%0A non_asgi_context = %7B%7D%0A self.assertTrue(self.client.application.supports(hb.config, asgi_context))%0A self.assertFalse(self.client.application.supports(hb.config, non_asgi_context))%0A %0A @aiounittest.async_test%0A @mock.patch(%22honeybadger.contrib.asgi.honeybadger%22)%0A async def test_should_notify_exception(self, hb):%0A with self.assertRaises(SomeError):%0A await self.client.get(%22/error%22)%0A hb.notify.assert_called_once()%0A self.assertEqual(type(hb.notify.call_args.kwargs%5B%22exception%22%5D), SomeError)%0A%0A @aiounittest.async_test%0A @mock.patch(%22honeybadger.contrib.asgi.honeybadger%22)%0A async def test_should_not_notify_exception(self, hb):%0A response = self.client.get(%22/%22)%0A hb.notify.assert_not_called() %0A %0A
10dd7a4a70fe639b806e004bc0a0d6fb791279a3
Add a utility script:
utils/misc/grep-svn-log.py
utils/misc/grep-svn-log.py
Python
0.000301
@@ -0,0 +1,2410 @@ +#!/usr/bin/env python%0A%0A%22%22%22%0AGreps and returns the first svn log entry containing a line matching the regular%0Aexpression pattern passed as the only arg.%0A%0AExample:%0A%0Asvn log -v %7C grep-svn-log.py '%5E D.+why_are_you_missing.h$'%0A%22%22%22%0A%0Aimport fileinput, re, sys, StringIO%0A%0A# Separator string for %22svn log -v%22 output.%0Aseparator = '-' * 72%0A%0Ausage = %22%22%22Usage: grep-svn-log.py line-pattern%0AExample:%0A svn log -v %7C grep-svn-log.py '%5E D.+why_are_you_missing.h'%22%22%22%0A%0Aclass Log(StringIO.StringIO):%0A %22%22%22Simple facade to keep track of the log content.%22%22%22%0A def __init__(self):%0A self.reset()%0A def add_line(self, a_line):%0A %22%22%22Add a line to the content, if there is a previous line, commit it.%22%22%22%0A global separator%0A if self.prev_line != None:%0A print %3E%3E self, self.prev_line%0A self.prev_line = a_line%0A self.separator_added = (a_line == separator)%0A def del_line(self):%0A %22%22%22Forget about the previous line, do not commit it.%22%22%22%0A self.prev_line = None%0A def reset(self):%0A %22%22%22Forget about the previous lines entered.%22%22%22%0A StringIO.StringIO.__init__(self)%0A self.prev_line = None%0A def finish(self):%0A %22%22%22Call this when you're finished with populating content.%22%22%22%0A if self.prev_line != None:%0A print %3E%3E self, self.prev_line%0A self.prev_line = None%0A%0Adef grep(regexp):%0A # The log content to be written out once a match is found.%0A log = Log()%0A%0A LOOKING_FOR_MATCH = 0%0A FOUND_LINE_MATCH = 1%0A state = LOOKING_FOR_MATCH%0A%0A while 1:%0A line = sys.stdin.readline()%0A if not line:%0A return%0A line = line.splitlines()%5B0%5D%0A if state == FOUND_LINE_MATCH:%0A # At this state, we keep on accumulating lines until the separator%0A # is encountered. At which point, we can return the log content.%0A if line == separator:%0A print log.getvalue()%0A return%0A log.add_line(line)%0A%0A elif state == LOOKING_FOR_MATCH:%0A if line == separator:%0A log.reset()%0A log.add_line(line)%0A # Update next state if necessary.%0A if regexp.search(line):%0A state = FOUND_LINE_MATCH%0A%0Adef main():%0A if len(sys.argv) != 2:%0A print usage%0A sys.exit(0)%0A%0A regexp = re.compile(sys.argv%5B1%5D)%0A grep(regexp)%0A%0Aif __name__ == '__main__':%0A main()%0A
8dc7a1e239dc22dd4eb69cfe1754586e3a1690dc
Test javascript using the "js"
tests/test_run_js.py
tests/test_run_js.py
Python
0
@@ -0,0 +1,392 @@ +import os%0A%0Afrom py2js import JavaScript%0A%0Adef f(x):%0A return x%0A%0Adef test(func, run):%0A func_source = str(JavaScript(func))%0A run_file = %22/tmp/run.js%22%0A with open(run_file, %22w%22) as f:%0A f.write(func_source)%0A f.write(%22%5Cn%22)%0A f.write(run)%0A r = os.system('js -f defs.js -f %25s' %25 run_file)%0A assert r == 0%0A%0A%0Atest(f, %22assert(f(3) == 3)%22)%0Atest(f, %22assert(f(3) != 4)%22)%0A
05b47d5346281d7c899063fa9eda604f5c466431
Make sure we build the tools used for testing minidumps on Android.
build/all_android.gyp
build/all_android.gyp
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # This is all.gyp file for Android to prevent breakage in Android and other # platform; It will be churning a lot in the short term and eventually be merged # into all.gyp. { 'variables': { # A hook that can be overridden in other repositories to add additional # compilation targets to 'All' 'android_app_targets%': [], }, 'targets': [ { 'target_name': 'All', 'type': 'none', 'dependencies': [ '../content/content.gyp:content_shell_apk', '<@(android_app_targets)', 'android_builder_tests', '../android_webview/android_webview.gyp:android_webview_apk', '../chrome/chrome.gyp:chromium_testshell', ], }, # target_name: All { # The current list of tests for android. This is temporary # until the full set supported. If adding a new test here, # please also add it to build/android/run_tests.py, else the # test is not run. # # WARNING: # Do not add targets here without communicating the implications # on tryserver triggers and load. Discuss with jrg please. 'target_name': 'android_builder_tests', 'type': 'none', 'dependencies': [ '../android_webview/android_webview.gyp:android_webview_unittests', '../base/android/jni_generator/jni_generator.gyp:jni_generator_tests', '../base/base.gyp:base_unittests', '../cc/cc_tests.gyp:cc_unittests', '../chrome/chrome.gyp:unit_tests', '../content/content.gyp:content_shell_test_apk', '../content/content.gyp:content_unittests', '../gpu/gpu.gyp:gpu_unittests', '../ipc/ipc.gyp:ipc_tests', '../media/media.gyp:media_unittests', '../net/net.gyp:net_unittests', '../sql/sql.gyp:sql_unittests', '../sync/sync.gyp:sync_unit_tests', '../third_party/WebKit/Source/WebKit/chromium/All.gyp:*', '../tools/android/device_stats_monitor/device_stats_monitor.gyp:device_stats_monitor', '../tools/android/fake_dns/fake_dns.gyp:fake_dns', '../tools/android/findbugs_plugin/findbugs_plugin.gyp:findbugs_plugin_test', '../tools/android/forwarder2/forwarder.gyp:forwarder2', '../tools/android/md5sum/md5sum.gyp:md5sum', '../ui/ui.gyp:ui_unittests', # Required by ui_unittests. # TODO(wangxianzhu): It'd better let ui_unittests depend on it, but # this would cause circular gyp dependency which needs refactoring the # gyps to resolve. '../chrome/chrome_resources.gyp:packed_resources', ], 'conditions': [ ['linux_breakpad==1', { 'dependencies': [ '../breakpad/breakpad.gyp:breakpad_unittests', ], }], ['"<(gtest_target_type)"=="shared_library"', { 'dependencies': [ # The first item is simply the template. We add as a dep # to make sure it builds in ungenerated form. TODO(jrg): # once stable, transition to a test-only (optional) # target. '../testing/android/native_test.gyp:native_test_apk', # Unit test bundles packaged as an apk. '../android_webview/android_webview.gyp:android_webview_unittests_apk', '../base/base.gyp:base_unittests_apk', '../cc/cc_tests.gyp:cc_unittests_apk', '../chrome/chrome.gyp:unit_tests_apk', '../content/content.gyp:content_unittests_apk', '../gpu/gpu.gyp:gpu_unittests_apk', '../ipc/ipc.gyp:ipc_tests_apk', '../media/media.gyp:media_unittests_apk', '../net/net.gyp:net_unittests_apk', '../sql/sql.gyp:sql_unittests_apk', '../sync/sync.gyp:sync_unit_tests_apk', '../ui/ui.gyp:ui_unittests_apk', '../android_webview/android_webview.gyp:android_webview_test_apk', '../chrome/chrome.gyp:chromium_testshell_test_apk', '../webkit/compositor_bindings/compositor_bindings_tests.gyp:webkit_compositor_bindings_unittests_apk' ], }], ], }, { # Experimental / in-progress targets that are expected to fail # but we still try to compile them on bots (turning the stage # orange, not red). 'target_name': 'android_experimental', 'type': 'none', 'dependencies': [ ], }, { # In-progress targets that are expected to fail and are NOT run # on any bot. 'target_name': 'android_in_progress', 'type': 'none', 'dependencies': [ '../content/content.gyp:content_browsertests', ], }, ], # targets }
Python
0.000461
@@ -2873,32 +2873,394 @@ pad_unittests',%0A + # Also compile the tools needed to deal with minidumps, they are%0A # needed to run minidump tests upstream.%0A '../breakpad/breakpad.gyp:dump_syms#host',%0A '../breakpad/breakpad.gyp:symupload#host',%0A '../breakpad/breakpad.gyp:minidump_dump#host',%0A '../breakpad/breakpad.gyp:minidump_stackwalk#host'%0A %5D,%0A
e44bd0b5a5db15b99a06b7561b8146554b1419d2
Add genesisbalance class #217
bitshares/genesisbalance.py
bitshares/genesisbalance.py
Python
0
@@ -0,0 +1,1163 @@ +# -*- coding: utf-8 -*-%0Afrom .account import Account%0Afrom .instance import BlockchainInstance%0Afrom graphenecommon.genesisbalance import (%0A GenesisBalance as GrapheneGenesisBalance,%0A GenesisBalances as GrapheneGenesisBalances,%0A)%0A%0Afrom bitsharesbase.account import Address, PublicKey%0Afrom bitsharesbase import operations%0A%0A%[email protected]%0Aclass GenesisBalance(GrapheneGenesisBalance):%0A %22%22%22 Read data about a Genesis Balances from the chain%0A%0A :param str identifier: identifier of the balance%0A :param bitshares blockchain_instance: bitshares() instance to use when%0A accesing a RPC%0A%0A %22%22%22%0A%0A type_id = 15%0A%0A def define_classes(self):%0A self.account_class = Account%0A self.operations = operations%0A self.address_class = Address%0A self.publickey_class = PublicKey%0A%0A%[email protected]%0Aclass GenesisBalances(GrapheneGenesisBalances):%0A %22%22%22 List genesis balances that can be claimed from the%0A keys in the wallet%0A %22%22%22%0A%0A def define_classes(self):%0A self.genesisbalance_class = GenesisBalance%0A self.publickey_class = PublicKey%0A self.address_class = Address%0A
3dd71c02ea1fa9e39054bd82bf9e8657ec77d6b9
Add a script to recover the chat_id
tools/get_chat_id.py
tools/get_chat_id.py
Python
0
@@ -0,0 +1,931 @@ +#! /usr/bin/python3%0A# -*- coding:utf-8 -*-%0A%0A# by [email protected]%0A%0Aimport sys%0Aimport time%0Aimport telepot%0A%0Adef handle(msg):%0A content_type, chat_type, chat_id = telepot.glance(msg)%0A print(%22%5Ctchat_id: %7B%7D%22.format(chat_id))%0A%0A if content_type == 'text' and msg%5B'text'%5D == '/start':%0A ans = %22%22%22%0AHello %3Cb%3E%7Bfirst_name%7D%3C/b%3E, nice to meet you!%5Cn%0AYour chat_id is %3Ccode%3E%7Bchat_id%7D%3C/code%3E.%5Cn%0AYou can stop the %3Ccode%3Eget_chat_id%3C/code%3E script with %3Ccode%3ECTRL+C%3C/code%3E and start using the ProgressBot right now.%5Cn%0ASee you soon!%0A %22%22%22.format(first_name = msg%5B'from'%5D%5B'first_name'%5D,%0A chat_id = chat_id)%0A %0A bot.sendMessage(chat_id, ans, parse_mode = %22HTML%22)%0A%0ATOKEN = %22PUT_YOUR_TOKKEN_HERE%22%0A%0Abot = telepot.Bot(TOKEN)%0Abot.message_loop(handle)%0Aprint ('Listening ...')%0A%0A# Keep the program running.%0Awhile 1:%0A try:%0A time.sleep(10)%0A except KeyboardInterrupt:%0A print()%0A sys.exit()%0A
e950a53b2a392014fbfd7b9827a9f3f0b12a377b
add connector test class
connectortest.py
connectortest.py
Python
0
@@ -0,0 +1,696 @@ +import unittest%0Aimport threading%0Aimport re%0Aimport message%0Aimport StringIO%0Afrom connector import Connector, AppConnector%0Aimport SocketServer%0Afrom threadserver import DetailServer%0Afrom datetime import datetime%0Afrom PIL import Image%0A%0A%0Aclass App:%0A def update_msg(self, txtmsg):%0A print txtmsg.get_body()%0A return txtmsg%0A def update_image(self, imgmsg):%0A img = imgmsg.get_image()%0A img.show()%0A return imgmsg%0A%0A%0Aclass ConnectorTest(unittest.TestCase):%0A%0A def setUp(self):%0A self.app = App()%0A self.c = AppConnector(app=self.app)%0A%0A def test_header(self):%0A c = self.c%0A c.serve_forever()%0A%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
f97868b89da50532413465250d84308b84276296
add script
scripts/getliblist.py
scripts/getliblist.py
Python
0.000001
@@ -0,0 +1,806 @@ +#!/usr/bin/env python%0A%0Aimport sys%0Aimport os%0A%0Adef getlibs(invcf):%0A rgs = %7B%7D%0A with open(invcf, 'r') as vcf:%0A for line in vcf:%0A if not line.startswith('#'):%0A chrom, pos, id, ref, alt, qual, filter, info, format, sample = line.strip().split('%5Ct')%0A for rg in sample.split(':')%5B-1%5D.split(','):%0A rgs%5Brg%5D = True%0A%0A return rgs.keys()%0A%0A%0Aif len(sys.argv) == 2:%0A rgs = %5B%5D%0A with open(sys.argv%5B1%5D, 'r') as vcflist:%0A for vcf in vcflist:%0A vcf = vcf.strip()%0A assert os.path.exists(vcf), %22VCF not found: %22 + vcf%0A for rg in getlibs(vcf):%0A rgs.append(rg)%0A%0A print '%5Cn'.join(sorted(list(set(rgs))))%0A %0Aelse:%0A print %22usage:%22, sys.argv%5B0%5D, %22%3Ctebreak output vcf list in a file%3E%22%0A
fda7d76e4b10a1b43e3612742585d9abcc7b27da
Rename tags.py to search.py
tiddlywebplugins/tank/search.py
tiddlywebplugins/tank/search.py
Python
0.000003
@@ -0,0 +1,1890 @@ +%22%22%22%0ARoutines associated with finding and listing tags.%0A%0AAn experiment for now.%0A%22%22%22%0A%0Afrom tiddlyweb.model.bag import Bag%0Afrom tiddlyweb.model.policy import PermissionsError%0Afrom tiddlywebplugins.whoosher import get_searcher, query_parse%0A%0A%0Adef list_tags(environ, start_response):%0A %22%22%22%0A Plain text list of tags in a certain context.%0A%0A If a q query parameter is provided, then that is used to limit%0A the search space for tags. For example q=modifier:cdent bag:foobar%0A would return tags only from tiddlers in the bag foobar with most%0A recent modifier of cdent.%0A %22%22%22%0A config = environ%5B'tiddlyweb.config'%5D%0A query = environ%5B'tiddlyweb.query'%5D.get('q', %5BNone%5D)%5B0%5D%0A%0A searcher = get_searcher(config)%0A%0A if query:%0A # XXX this is not robust in the face of wacky inputs%0A # (including quoted inputs), for now we ride.%0A kwargs = dict(%5Bentry.split(':') for entry in query.split()%5D)%0A documents = searcher.documents(**kwargs)%0A else:%0A documents = searcher.documents()%0A%0A # As yet unknown if this will be slow or not.%0A set_tags = set()%0A for stored_fields in documents:%0A set_tags.update(stored_fields%5B'tags'%5D.split(','))%0A%0A start_response('200 OK', %5B('Content-Type', 'text/plain; charset=UTF-8')%5D)%0A%0A return '%5Cn'.join(set_tags)%0A%0A%0Adef get_comp_bags(store, config, usersign):%0A %22%22%22%0A Saving for later. Return a list of bags that can be used in%0A comps.%0A %22%22%22%0A comp_bags = %5B%5D%0A for result in full_search(config, 'title:app'):%0A bag, _ = result%5B'id'%5D.split(':', 1)%0A bag = store.get(Bag(bag))%0A try:%0A bag.policy.allows(usersign, 'read')%0A comp_bags.append(bag)%0A except PermissionsError:%0A pass%0A return comp_bags%0A%0A%0Adef full_search(config, query):%0A query = query_parse(config, query)%0A searcher = get_searcher(config)%0A return searcher.search(query)%0A
eac6545d0700d2a6c3de43db5ea8d46cfea12464
Update link.py
link.py
link.py
Python
0
@@ -0,0 +1,438 @@ +from module import XMPPModule%0Aimport halutils%0Aimport re, requests%0A %0Aclass Link(XMPPModule):%0A %0A def handleMessage(self, msg):%0A obj = re.match('.*(http%5Bs%5D?://.*)+', msg%5B'body'%5D)%0A %0A if obj:%0A addr = obj.group(1)%0A webpage = requests.get(addr).content%0A title = re.match('.*%3Ctitle%3E(.*)%3C/title%3E', str(webpage)).group(1).rstrip().lstrip()%0A self.xmpp.reply(msg, %22Website: %22 + title) %0A
c29e430301dc854dc7bd83ebc2a588cea70589a6
Fix has_perm issue in get_project_list
sentry/web/helpers.py
sentry/web/helpers.py
""" sentry.web.views ~~~~~~~~~~~~~~~~ :copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from django.conf import settings as dj_settings from django.core.urlresolvers import reverse, resolve from django.http import HttpResponse from django.template import loader from sentry.conf import settings from sentry.models import Project def get_project_list(user=None, flag=None): """ Returns a set of all projects a user has some level of access to. """ projects = dict((p.pk, p) for p in Project.objects.filter(public=True)) if user.is_authenticated(): projects.update(dict((p.pk, p) for p in Project.objects.filter(member_set__user=user) if (not flag or p.has_perm(flag)))) return projects _LOGIN_URL = None def get_login_url(reset=False): global _LOGIN_URL if _LOGIN_URL is None or reset: # if LOGIN_URL resolves force login_required to it instead of our own # XXX: this must be done as late as possible to avoid idempotent requirements try: resolve(dj_settings.LOGIN_URL) except: _LOGIN_URL = settings.LOGIN_URL else: _LOGIN_URL = dj_settings.LOGIN_URL if _LOGIN_URL is None: _LOGIN_URL = reverse('sentry-login') return _LOGIN_URL def iter_data(obj): for k, v in obj.data.iteritems(): if k.startswith('_') or k in ['url']: continue yield k, v def render_to_string(template, context={}): context.update({ 'has_search': False, 'MESSAGES_PER_PAGE': settings.MESSAGES_PER_PAGE, }) return loader.render_to_string(template, context) def render_to_response(template, context={}, status=200): response = HttpResponse(render_to_string(template, context)) response.status_code = status return response
Python
0
@@ -394,16 +394,32 @@ Project +Member, Project%0A %0A%0Adef ge @@ -675,39 +675,83 @@ update(dict( +%0A (p +m .p -k, p) +roject_id, pm.project)%0A for p +m in Project. @@ -741,32 +741,38 @@ or pm in Project +Member .objects.filter( @@ -775,30 +775,56 @@ ter( -member_set__user=user) +user=user).select_related('project')%0A if @@ -837,16 +837,17 @@ lag or p +m .has_per @@ -896,16 +896,18 @@ = None%0A +%0A%0A def get_ @@ -1362,17 +1362,16 @@ s None:%0A - @@ -1411,16 +1411,16 @@ login')%0A + retu @@ -1434,16 +1434,17 @@ IN_URL%0A%0A +%0A def iter @@ -1580,16 +1580,17 @@ d k, v%0A%0A +%0A def rend @@ -1740,16 +1740,16 @@ %7D)%0A%0A - retu @@ -1795,16 +1795,17 @@ ntext)%0A%0A +%0A def rend
1510a0faeff91f6f6ed7a1c5929628d430cb0506
Update file identification tools
fpr/migrations/0010_update_fido_136.py
fpr/migrations/0010_update_fido_136.py
Python
0
@@ -0,0 +1,836 @@ +# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Adef data_migration(apps, schema_editor):%0A IDTool = apps.get_model('fpr', 'IDTool')%0A IDTool.objects.filter(description='Fido', version='1.3.5').update(version='1.3.6')%0A IDTool.objects.filter(description='Siegfried', version='1.6.7').update(version='1.7.3')%0A%0Adef reverse_migration(apps, schema_editor):%0A IDTool = apps.get_model('fpr', 'IDTool')%0A IDTool.objects.filter(description='Fido', version='1.3.6').update(version='1.3.5')%0A IDTool.objects.filter(description='Siegfried', version='1.7.3').update(version='1.6.7')%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('fpr', '0009_pronom_90'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(data_migration, reverse_migration),%0A %5D%0A
93548efe9eb04dd9659e3cc76c711d967e8770df
Create filereader.py
filereader.py
filereader.py
Python
0
@@ -0,0 +1,1107 @@ +#!/usr/bin/python%0Aimport os%0Aimport re%0Afrom optparse import OptionParser%0ASUFFIX=%22.out%22%0A%0Adef main () :%0A global filename%0A parser = OptionParser()%0A parser.add_option(%22-f%22, %22--file%22, dest=%22filename%22,%0A help=%22the file to update%22, metavar=%22FILE%22)%0A parser.add_option(%22-n%22, %22--name%22, dest=%22name%22,%0A help=%22the name to replace the original name with%22, metavar=%22NAME%22)%0A parser.add_option(%22-c%22, %22--fromname%22, dest=%22fromname%22,%0A help=%22the name be replaced%22, metavar=%22FROMNAME%22)%0A (options, args) = parser.parse_args()%0A if not options.filename :%0A print %22You must specify the file to modify%22%0A exit(-1)%0A if not options.name :%0A print %22You must specify the name to replace Tim with%22%0A exit(-1)%0A if not options.fromname :%0A print %22You must specify the name to be replaced%22%0A exit(-1)%0A fin = open(options.filename, 'r')%0A fout = open(options.filename + SUFFIX, 'w')%0A%0A for line in fin :%0A fout.write(re.sub(options.fromname, options.name, line))%0A fin.close()%0A fout.close()%0A%0Amain()%0A
23ab301f4773892f6db7321105f79ba0c48404a3
add urls
src/doc/expedient/source/developer/sshaggregate/urls.py
src/doc/expedient/source/developer/sshaggregate/urls.py
Python
0.000006
@@ -0,0 +1,392 @@ +from django.conf.urls.defaults import *%0A%0Aurlpatterns = patterns('sshaggregate.views',%0A url(r'%5Eaggregate/create/$', 'aggregate_crud', name='sshaggregate_aggregate_create'),%0A url(r'%5Eaggregate/(?P%3Cagg_id%3E%5Cd+)/edit/$', 'aggregate_crud', name='sshaggregate_aggregate_edit'),%0A url(r'%5Eaggregate/(?P%3Cagg_id%3E%5Cd+)/servers/$', 'aggregate_add_servers', name='sshaggregate_aggregate_servers'),%0A)%0A
fed2e3f9bdb3a00b077b5e7df1aed4d927b77b6c
Add test for Clifford drudge by quaternions
tests/clifford_test.py
tests/clifford_test.py
Python
0
@@ -0,0 +1,697 @@ +%22%22%22Test for the Clifford algebra drudge.%22%22%22%0A%0Afrom drudge import CliffordDrudge, Vec, inner_by_delta%0A%0A%0Adef test_clifford_drudge_by_quaternions(spark_ctx):%0A %22%22%22Test basic functionality of Clifford drudge by quaternions.%0A %22%22%22%0A%0A dr = CliffordDrudge(%0A spark_ctx, inner=lambda v1, v2: -inner_by_delta(v1, v2)%0A )%0A e_ = Vec('e')%0A%0A i_ = dr.sum(e_%5B2%5D * e_%5B3%5D).simplify()%0A j_ = dr.sum(e_%5B3%5D * e_%5B1%5D).simplify()%0A k_ = dr.sum(e_%5B1%5D * e_%5B2%5D).simplify()%0A%0A for i in %5Bi_, j_, k_%5D:%0A assert (i * i).simplify() == -1%0A%0A assert (i_ * j_ * k_).simplify() == -1%0A%0A assert (i_ * j_).simplify() == k_%0A assert (j_ * k_).simplify() == i_%0A assert (k_ * i_).simplify() == j_%0A
09a0689b8e521c1d5c0ea68ac448dc9ae7abcff5
Read the header of a fits file and/or look up a single key (case insensitive).
fitsHeader.py
fitsHeader.py
Python
0
@@ -0,0 +1,859 @@ +#!/usr/bin/env python%0A# -*- coding: utf8 -*-%0A%0A# My imports%0Afrom __future__ import division%0Afrom astropy.io import fits%0Afrom pydoc import pager%0Aimport argparse%0A%0A%0Adef _parser():%0A parser = argparse.ArgumentParser(description='View the header of a fits file')%0A parser.add_argument('input', help='File name of fits file')%0A parser.add_argument('-key', help='Look up a given key (case insensitive)', default=None)%0A return parser.parse_args()%0A%0A%0Aif __name__ == '__main__':%0A args = _parser()%0A h = fits.getheader(args.input)%0A h.keys = map(str.lower, h.keys())%0A%0A if args.key:%0A args.key = args.key.lower()%0A try:%0A print h%5Bargs.key%5D%0A except KeyError:%0A raise KeyError('Key was not found')%0A else:%0A string = '%5Cn'.join(%22%7B!s%7D : %7B!r%7D%22.format(key, val) for (key, val) in h.items())%0A pager(string)%0A
b674f921a8e5cffb2d3e320f564c61ca01455a9f
Add command to generate a csv of talk titles and video reviewers
wafer/management/commands/wafer_talk_video_reviewers.py
wafer/management/commands/wafer_talk_video_reviewers.py
Python
0.000011
@@ -0,0 +1,864 @@ +import sys%0Aimport csv%0A%0Afrom django.core.management.base import BaseCommand%0A%0Afrom django.contrib.auth import get_user_model%0Afrom wafer.talks.models import Talk, ACCEPTED, PROVISIONAL%0A%0A%0Aclass Command(BaseCommand):%0A help = (%22List talks and the associated video_reviewer emails.%22%0A %22 Only reviewers for accepted talks are listed%22)%0A%0A def _video_reviewers(self, options):%0A talks = Talk.objects.filter(status=ACCEPTED)%0A%0A csv_file = csv.writer(sys.stdout)%0A for talk in talks:%0A reviewer = talk.video_reviewer%0A if not reviewer:%0A reviewer = 'NO REVIEWER'%0A row = %5Bx.encode(%22utf-8%22) for x in (%0A talk.title,%0A reviewer,%0A )%5D%0A csv_file.writerow(row)%0A%0A def handle(self, *args, **options):%0A self._video_reviewers(options)%0A
3db3c22d83071550d8bbd70062f957cf43c5e54a
Add a compatibility module, because of Python 2/3 compatibility issues.
cart/_compatibility.py
cart/_compatibility.py
Python
0
@@ -0,0 +1,460 @@ +import sys%0A%0Ais_py3 = sys.version_info%5B0%5D %3E= 3%0A%0A%0Adef utf8(string):%0A %22%22%22Cast to unicode DAMMIT!%0A Written because Python2 repr always implicitly casts to a string, so we%0A have to cast back to a unicode (and we now that we always deal with valid%0A unicode, because we check that in the beginning).%0A %22%22%22%0A if is_py3:%0A return str(string)%0A elif not isinstance(string, unicode):%0A return unicode(str(string), 'UTF-8')%0A return string%0A
156b7dfc11f24a7d77d2280e8ddade3cb7a474b7
Add a script for listing all Elasticsearch indexes
misc/list_all_es_indexes.py
misc/list_all_es_indexes.py
Python
0
@@ -0,0 +1,1325 @@ +#!/usr/bin/env python%0A# -*- encoding: utf-8%0A%0Aimport boto3%0Aimport hcl%0Aimport requests%0A%0A%0Adef get_terraform_vars():%0A s3_client = boto3.client(%22s3%22)%0A tfvars_body = s3_client.get_object(%0A Bucket=%22wellcomecollection-platform-infra%22,%0A Key=%22terraform.tfvars%22%0A )%5B%22Body%22%5D%0A return hcl.load(tfvars_body)%0A%0A%0Adef build_url(es_credentials):%0A protocol = es_credentials%5B%22protocol%22%5D%0A name = es_credentials%5B%22name%22%5D%0A region = es_credentials%5B%22region%22%5D%0A port = es_credentials%5B%22port%22%5D%0A return f%22%7Bprotocol%7D://%7Bname%7D.%7Bregion%7D.aws.found.io:%7Bport%7D%22%0A%0A%0Adef get_all_indexes(es_url, username, password):%0A resp = requests.get(%0A f%22%7Bes_url%7D/_cat/indices%22,%0A auth=(username, password),%0A params=%7B%22format%22: %22json%22%7D%0A )%0A resp.raise_for_status()%0A%0A return resp.json()%0A%0A%0Aif __name__ == %22__main__%22:%0A terraform_vars = get_terraform_vars()%0A es_cluster_credentials = terraform_vars%5B%22es_cluster_credentials%22%5D%0A%0A es_url = build_url(es_cluster_credentials)%0A%0A username = es_cluster_credentials%5B%22username%22%5D%0A password = es_cluster_credentials%5B%22password%22%5D%0A%0A indexes = get_all_indexes(es_url, username=username, password=password)%0A%0A print(%0A '%5Cn'.join(sorted(%0A idx%5B%22index%22%5D%0A for idx in indexes%0A if not idx%5B%22index%22%5D.startswith(%22.%22)%0A ))%0A )%0A
006a921f19f6c4f64d694c86346ad85ada2c8bb8
Add tests for subclass support
tests/subclass_test.py
tests/subclass_test.py
Python
0
@@ -0,0 +1,3129 @@ +#! /usr/bin/env python%0A# -*- coding: utf-8 -*-%0A# vi:ts=4:et%0A%0Atry:%0A import unittest2 as unittest%0Aexcept ImportError:%0A import unittest%0Aimport pycurl%0A%0ACLASSES = (pycurl.Curl, pycurl.CurlMulti, pycurl.CurlShare)%0A%0Aclass SubclassTest(unittest.TestCase):%0A def test_baseclass_init(self):%0A # base classes do not accept any arguments on initialization%0A for baseclass in CLASSES:%0A try:%0A baseclass(0)%0A except TypeError:%0A pass%0A else:%0A raise AssertionError('Base class accepted invalid args')%0A try:%0A baseclass(a=1)%0A except TypeError:%0A pass%0A else:%0A raise AssertionError('Base class accepted invalid kwargs')%0A%0A def test_subclass_create(self):%0A for baseclass in CLASSES:%0A # test creation of a subclass%0A class MyCurlClass(baseclass):%0A pass%0A # test creation of its object%0A obj = MyCurlClass()%0A # must be of type subclass, but also an instance of base class%0A assert type(obj) == MyCurlClass%0A assert isinstance(obj, baseclass)%0A%0A def test_subclass_init(self):%0A for baseclass in CLASSES:%0A class MyCurlClass(baseclass):%0A def __init__(self, x, y=4):%0A self.x = x%0A self.y = y%0A # subclass __init__ must be able to accept args and kwargs%0A obj = MyCurlClass(3)%0A assert obj.x == 3%0A assert obj.y == 4%0A obj = MyCurlClass(5, y=6)%0A assert obj.x == 5%0A assert obj.y == 6%0A # and it must throw TypeError if arguments don't match%0A try:%0A MyCurlClass(1, 2, 3, kwarg=4)%0A except TypeError:%0A pass%0A else:%0A raise AssertionError('Subclass accepted invalid arguments')%0A%0A def test_subclass_method(self):%0A for baseclass in CLASSES:%0A class MyCurlClass(baseclass):%0A def my_method(self, x):%0A return x + 1%0A obj = MyCurlClass()%0A # methods must be able to accept arguments and return a value%0A assert obj.my_method(1) == 2%0A%0A def test_subclass_method_override(self):%0A # setopt args for each base class%0A args = %7B%0A pycurl.Curl: (pycurl.VERBOSE, 1),%0A pycurl.CurlMulti: (pycurl.M_MAXCONNECTS, 3),%0A pycurl.CurlShare: (pycurl.SH_SHARE, pycurl.LOCK_DATA_COOKIE),%0A %7D%0A for baseclass in CLASSES:%0A class MyCurlClass(baseclass):%0A def setopt(self, option, value):%0A # base method must not be overwritten%0A assert super().setopt != self.setopt%0A # base method mut be callable, setopt must return None%0A assert super().setopt(option, value) is None%0A # return something else%0A return 'my setopt'%0A obj = MyCurlClass()%0A assert obj.setopt(*args%5Bbaseclass%5D) == 'my setopt'%0A
c8816f509a661ed53c166d843ebfb7dcb6b8d75a
use only single threaded svrlight
examples/undocumented/python_modular/regression_svrlight_modular.py
examples/undocumented/python_modular/regression_svrlight_modular.py
########################################################################### # svm light based support vector regression ########################################################################### from numpy import array from numpy.random import seed, rand from tools.load import LoadMatrix lm=LoadMatrix() traindat = lm.load_numbers('../data/fm_train_real.dat') testdat = lm.load_numbers('../data/fm_test_real.dat') label_traindat = lm.load_labels('../data/label_train_twoclass.dat') parameter_list = [[traindat,testdat,label_traindat,1.2,1,1e-5,1e-2,3],[traindat,testdat,label_traindat,2.3,0.5,1e-5,1e-6,1]] def regression_svrlight_modular(fm_train=traindat,fm_test=testdat,label_train=label_traindat, \ width=1.2,C=1,epsilon=1e-5,tube_epsilon=1e-2,num_threads=3): from shogun.Features import Labels, RealFeatures from shogun.Kernel import GaussianKernel try: from shogun.Regression import SVRLight except ImportError: print 'No support for SVRLight available.' return feats_train=RealFeatures(fm_train) feats_test=RealFeatures(fm_test) kernel=GaussianKernel(feats_train, feats_train, width) labels=Labels(label_train) svr=SVRLight(C, epsilon, kernel, labels) svr.set_tube_epsilon(tube_epsilon) svr.parallel.set_num_threads(num_threads) svr.train() kernel.init(feats_train, feats_test) out = svr.classify().get_labels() return out, kernel if __name__=='__main__': print 'SVRLight' regression_svrlight_modular(*parameter_list[0])
Python
0
@@ -546,17 +546,17 @@ -5,1e-2, -3 +1 %5D,%5Btrain
7327250621dc34a1e7c2f1998333d65024583168
add simple test
tests/test_commands.py
tests/test_commands.py
Python
0.00057
@@ -0,0 +1,759 @@ +# Copyright 2014 Rackspace, Inc.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0Afrom zpmlib import commands%0A%0Adef test_all_commands_sorted():%0A cmd_names = %5Bcmd.__name__ for cmd in commands.all_commands()%5D%0A assert cmd_names == sorted(cmd_names)%0A
2b2f11cc7650fc5c40cd21a6e8ad671656fc9b21
add quicksort
quicksort.py
quicksort.py
Python
0.00001
@@ -0,0 +1,728 @@ +'''%0AQuickSort implementation%0A'''%0A%0A%0Adef quick_sort(arr, l, r):%0A i = l%0A j = r%0A x = arr%5B(l + r) / 2%5D%0A%0A if len(arr) == 0:%0A return arr%0A else:%0A while True:%0A while arr%5Bi%5D %3C x:%0A i += 1%0A while arr%5Bj%5D %3E x:%0A j -= 1%0A if i %3C= j:%0A tmp = arr%5Bi%5D%0A arr%5Bi%5D = arr%5Bj%5D%0A arr%5Bj%5D = tmp%0A i += 1%0A j -= 1%0A%0A if i %3E= j:%0A break%0A%0A if l %3C j:%0A quick_sort(arr, l, j)%0A if i %3C r:%0A quick_sort(arr, i, r)%0A%0A%0Aif __name__ == '__main__':%0A arr = %5B12, 4, 5, 6, 7, 3, 1, 15%5D%0A quick_sort(arr, 0, len(arr) - 1)%0A print arr%0A
177590fd65b3daa4aad77e3297fd0b92ae74f04c
Simplify MQTT light code
homeassistant/components/light/mqtt.py
homeassistant/components/light/mqtt.py
""" homeassistant.components.light.mqtt ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Allows to configure a MQTT light. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/light.mqtt/ """ from functools import partial import logging import homeassistant.components.mqtt as mqtt from homeassistant.components.light import (Light, ATTR_BRIGHTNESS, ATTR_RGB_COLOR) from homeassistant.util.template import render_with_possible_json_value _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = 'MQTT Light' DEFAULT_QOS = 0 DEFAULT_PAYLOAD_ON = 'ON' DEFAULT_PAYLOAD_OFF = 'OFF' DEFAULT_OPTIMISTIC = False DEPENDENCIES = ['mqtt'] CONF_TOPICS = [typ + topic for typ in ('', 'brightness_', 'rgb_') for topic in ('state_topic', 'command_topic')] CONF_VALUE_TEMPLATES = [typ + '_value_template' for typ in ('state', 'brightness', 'rgb')] def setup_platform(hass, config, add_devices_callback, discovery_info=None): """ Add MQTT Light. """ if config.get('command_topic') is None: _LOGGER.error("Missing required variable: command_topic") return False add_devices_callback([MqttLight( hass, config.get('name', DEFAULT_NAME), {key: config.get(key) for key in CONF_TOPICS}, {key: config.get(key + '_value_template') for key in ('state', 'brightness', 'rgb')}, config.get('qos', DEFAULT_QOS), { 'on': config.get('payload_on', DEFAULT_PAYLOAD_ON), 'off': config.get('payload_off', DEFAULT_PAYLOAD_OFF) }, config.get('optimistic', DEFAULT_OPTIMISTIC))]) class MqttLight(Light): """ Provides a MQTT light. """ # pylint: disable=too-many-arguments,too-many-instance-attributes def __init__(self, hass, name, topic, templates, qos, payload, optimistic): self._hass = hass self._name = name self._topic = topic self._qos = qos self._payload = payload self._optimistic = optimistic or topic["state_topic"] is None self._optimistic_rgb = optimistic or topic["rgb_state_topic"] is None self._optimistic_brightness = (optimistic or topic["brightness_state_topic"] is None) self._state = False templates = {key: ((lambda value: value) if tpl is None else partial(render_with_possible_json_value, hass, tpl)) for key, tpl in templates.items()} def state_received(topic, payload, qos): """ A new MQTT message has been received. """ payload = templates['state'](payload) if payload == self._payload["on"]: self._state = True elif payload == self._payload["off"]: self._state = False self.update_ha_state() if self._topic["state_topic"] is not None: mqtt.subscribe(self._hass, self._topic["state_topic"], state_received, self._qos) def brightness_received(topic, payload, qos): """ A new MQTT message for the brightness has been received. """ self._brightness = int(templates['brightness'](payload)) self.update_ha_state() if self._topic["brightness_state_topic"] is not None: mqtt.subscribe(self._hass, self._topic["brightness_state_topic"], brightness_received, self._qos) self._brightness = 255 else: self._brightness = None def rgb_received(topic, payload, qos): """ A new MQTT message has been received. """ self._rgb = [int(val) for val in templates['rgb'](payload).split(',')] self.update_ha_state() if self._topic["rgb_state_topic"] is not None: mqtt.subscribe(self._hass, self._topic["rgb_state_topic"], rgb_received, self._qos) self._rgb = [255, 255, 255] else: self._rgb = None @property def brightness(self): """ Brightness of this light between 0..255. """ return self._brightness @property def rgb_color(self): """ RGB color value. """ return self._rgb @property def should_poll(self): """ No polling needed for a MQTT light. """ return False @property def name(self): """ Returns the name of the device if any. """ return self._name @property def is_on(self): """ True if device is on. """ return self._state def turn_on(self, **kwargs): """ Turn the device on. """ should_update = False if ATTR_RGB_COLOR in kwargs and \ self._topic["rgb_command_topic"] is not None: mqtt.publish(self._hass, self._topic["rgb_command_topic"], "{},{},{}".format(*kwargs[ATTR_RGB_COLOR]), self._qos) if self._optimistic_rgb: self._rgb = kwargs[ATTR_RGB_COLOR] should_update = True if ATTR_BRIGHTNESS in kwargs and \ self._topic["brightness_command_topic"] is not None: mqtt.publish(self._hass, self._topic["brightness_command_topic"], kwargs[ATTR_BRIGHTNESS], self._qos) if self._optimistic_brightness: self._brightness = kwargs[ATTR_BRIGHTNESS] should_update = True mqtt.publish(self._hass, self._topic["command_topic"], self._payload["on"], self._qos) if self._optimistic: # optimistically assume that switch has changed state self._state = True should_update = True if should_update: self.update_ha_state() def turn_off(self, **kwargs): """ Turn the device off. """ mqtt.publish(self._hass, self._topic["command_topic"], self._payload["off"], self._qos) if self._optimistic: # optimistically assume that switch has changed state self._state = False self.update_ha_state()
Python
0.033981
@@ -718,267 +718,8 @@ '%5D%0A%0A -CONF_TOPICS = %5Btyp + topic%0A for typ in ('', 'brightness_', 'rgb_')%0A for topic in ('state_topic', 'command_topic')%5D%0ACONF_VALUE_TEMPLATES = %5Btyp + '_value_template'%0A for typ in ('state', 'brightness', 'rgb')%5D%0A%0A %0Adef @@ -1090,20 +1090,136 @@ y in - CONF_TOPICS +%0A (typ + topic%0A for typ in ('', 'brightness_', 'rgb_')%0A for topic in ('state_topic', 'command_topic')) %7D,%0A
9cdc580ea9292fbd06cff38620de3697f1b60270
Stop landscape.io/pylint complaining about cli args
tvrenamr/cli/core.py
tvrenamr/cli/core.py
#!/usr/bin/env python from __future__ import absolute_import import functools import logging import sys import click from tvrenamr import errors from tvrenamr.cli.helpers import (build_file_list, get_config, start_dry_run, stop_dry_run) from tvrenamr.logs import start_logging from tvrenamr.main import File, TvRenamr log = logging.getLogger('CLI') @click.command() @click.option('--config', type=click.Path(), help='Select a location for your config file. If the path is invalid the default locations will be used.') # noqa @click.option('-c', '--canonical', help='Set the show\'s canonical name to use when performing the online lookup.') # noqa @click.option('--debug', is_flag=True) @click.option('-d', '--dry-run', is_flag=True, help='Dry run your renaming.') @click.option('-e', '--episode', type=int, help='Set the episode number. Currently this will cause errors when working with more than one file.') # noqa @click.option('--ignore-filelist', type=tuple, default=()) @click.option('--ignore-recursive', is_flag=True, help='Only use files from the root of a given directory, not entering any sub-directories.') # noqa @click.option('--log-file', type=click.Path(exists=True), help='Set the log file location.') @click.option('-l', '--log-level', help='Set the log level. Options: short, minimal, info and debug.') # noqa @click.option('--log-file', type=click.Path(exists=True), help='Set the log file location.') @click.option('-n', '--name', help="Set the episode's name.") @click.option('--no-cache', is_flag=True, help='Force all renames to ignore the cache.') @click.option('-o', '--output-format', help='Set the output format for the episodes being renamed.') @click.option('--organise/--no-organise', default=True, help='Organise renamed files into folders based on their show name and season number. Can be explicitly disabled.') # noqa @click.option('-p', '--partial', is_flag=True, help='Allow partial regex matching of the filename.') @click.option('-q', '--quiet', is_flag=True, help="Don't output logs to the command line") @click.option('-r', '--recursive', is_flag=True, help='Recursively lookup files in a given directory') # noqa @click.option('--rename-dir', type=click.Path(), help='The directory to move renamed files to, if not specified the working directory is used.') # noqa @click.option('--no-rename-dir', is_flag=True, default=False, help='Explicity tell Tv Renamr not to move renamed files. Used to override the config.') # noqa @click.option('--regex', help='The regular expression to use when extracting information from files.') # noqa @click.option('-s', '--season', help='Set the season number.') @click.option('--show', help="Set the show's name (will search for this name).") @click.option('--show-override', help="Override the show's name (only replaces the show's name in the final file)") # noqa @click.option('--specials', help='Set the show\'s specials folder (defaults to "Season 0")') @click.option('-t', '--the', is_flag=True, help="Set the position of 'The' in a show's name to the end of the show name") # noqa @click.argument('paths', nargs=-1, required=False, type=click.Path(exists=True)) def rename(config, canonical, debug, dry_run, episode, ignore_filelist, ignore_recursive, log_file, log_level, name, no_cache, output_format, organise, partial, quiet, recursive, rename_dir, no_rename_dir, regex, season, show, show_override, specials, the, paths): if debug: log_level = 10 start_logging(log_file, log_level, quiet) logger = functools.partial(log.log, level=26) if dry_run or debug: start_dry_run(logger) for current_dir, filename in build_file_list(paths, recursive, ignore_filelist): try: tv = TvRenamr(current_dir, debug, dry_run, no_cache) _file = File(**tv.extract_details_from_file(filename, user_regex=regex)) # TODO: Warn setting season & episode will override *all* episodes _file.user_overrides(show, season, episode) _file.safety_check() config = get_config(config) for episode in _file.episodes: canonical = config.get( 'canonical', _file.show_name, default=episode._file.show_name, override=canonical ) episode.title = tv.retrieve_episode_title(episode, canonical=canonical) show = config.get_output(_file.show_name, override=show_override) the = config.get('the', show=_file.show_name, override=the) _file.show_name = tv.format_show_name(show, the=the) _file.set_output_format(config.get( 'format', _file.show_name, default=_file.output_format, override=output_format )) organise = config.get( 'organise', _file.show_name, default=False, override=organise ) rename_dir = config.get( 'renamed', _file.show_name, default=current_dir, override=rename_dir ) specials_folder = config.get( 'specials_folder', _file.show_name, default='Season 0', override=specials, ) path = tv.build_path( _file, rename_dir=rename_dir, organise=organise, specials_folder=specials_folder, ) tv.rename(filename, path) except errors.NoNetworkConnectionException: if dry_run or debug: stop_dry_run(logger) sys.exit(1) except (AttributeError, errors.EmptyEpisodeTitleException, errors.EpisodeNotFoundException, errors.IncorrectCustomRegularExpressionSyntaxException, errors.InvalidXMLException, errors.MissingInformationException, errors.OutputFormatMissingSyntaxException, errors.PathExistsException, errors.ShowNotFoundException, errors.UnexpectedFormatException) as e: continue except Exception as e: if debug: # In debug mode, show the full traceback. raise for msg in e.args: log.critical('Error: %s', msg) sys.exit(1) # if we're not doing a dry run add a blank line for clarity if not (debug and dry_run): log.info('') if dry_run or debug: stop_dry_run(logger)
Python
0
@@ -3273,16 +3273,56 @@ ilelist, + #pylint: disable-msg=too-many-arguments %0A @@ -3379,16 +3379,56 @@ o_cache, + #pylint: disable-msg=too-many-arguments %0A @@ -3486,32 +3486,72 @@ ive, rename_dir, + #pylint: disable-msg=too-many-arguments %0A no_r @@ -3611,16 +3611,56 @@ ls, the, + #pylint: disable-msg=too-many-arguments %0A
b102a2769dc70deb2055a2d4ae0bf11f48c13f9d
add game window
core/core.py
core/core.py
Python
0.000001
@@ -0,0 +1,989 @@ +# -*- coding: utf-8 -*-%0Aimport pygame%0Afrom pygame.locals import *%0A%0Aclass App:%0A def __init__(self):%0A self._running = True%0A self._display_surf = None%0A self.size = self.weight, self.height = 1024, 576%0A%0A def on_init(self):%0A pygame.init()%0A self._display_surf = pygame.display.set_mode(self.size, pygame.HWSURFACE %7C pygame.DOUBLEBUF)%0A self._running = True%0A%0A def on_event(self, event):%0A if event.type == pygame.QUIT:%0A self._running = False%0A def on_loop(self):%0A pass%0A def on_render(self):%0A pass%0A def on_cleanup(self):%0A pygame.quit()%0A%0A def on_execute(self):%0A if self.on_init() == False:%0A self._running = False%0A%0A while( self._running ):%0A for event in pygame.event.get():%0A self.on_event(event)%0A self.on_loop()%0A self.on_render()%0A self.on_cleanup()%0A%0Aif __name__ == %22__main__%22 :%0A theApp = App()%0A theApp.on_execute()
fb6dd1a92471697b8665364dfaa7fedc519d00ed
Create properties.py
data/properties.py
data/properties.py
Python
0.000001
@@ -0,0 +1,967 @@ +import libtcodpy as libtcod%0Aclass Object():%0A def __init__(self, x, y, char, color, screen):%0A self.x = x%0A self.y = y%0A self.char = char%0A self.color = color%0A self.screen = screen%0A%0A%0A def draw_object(self):%0A #Set the color of the character and draw it%0A libtcod.console_set_default_foreground(self.screen, self.color)%0A libtcod.console_put_char(self.screen, self.x, self.y, self.char, libtcod.BKGND_NONE)%0A%0A def delete(self):%0A #Erase the char%0A libtcod.console_put_char(self.screen, self.x, self.y, self.char, libtcod.BKGND_NONE)%0A %0A%0A %0A%0A%0Aclass Tile():%0A #Properties of a map's tiles, theres not much to it like there is to Object%0A def __init__(self, blocked, blocked_sight):%0A self.blocked = blocked%0A self.blocked_sight = blocked_sight%0A%0A #blocked_sight's variable depends on blocked if its None%0A if blocked_sight == None: blocked_sight = blocked%0A %0A%0A
a2ba0c1658850064f55de1a99c3c2a49ef847b8d
Add join_by draft
drafts/join_by.py
drafts/join_by.py
Python
0
@@ -0,0 +1,920 @@ +def join_by(op, dicts, start=EMPTY):%0A dicts = list(dicts)%0A if not dicts:%0A return %7B%7D%0A elif len(dicts) == 1:%0A return dicts%5B0%5D%0A%0A result = %7B%7D%0A for d in dicts:%0A for k, v in iteritems(d):%0A if k in result:%0A result%5Bk%5D = op(result%5Bk%5D, v)%0A else:%0A result%5Bk%5D = v if start is EMPTY else op(start, v)%0A # result%5Bk%5D = v if start is EMPTY else start(v)%0A # result%5Bk%5D = v if start is EMPTY else op(start(), v)%0A # result%5Bk%5D = v if start is EMPTY else op(start() if callable(start) else start, v)%0A%0A return result%0A%0Ajoin_by(operator.__add__, dnfs, start=list)%0Ajoin_with(cat, dnfs)%0Ajoin_by(list.extend, dnfs, start=list)%0Ajoin_by(lambda c, _: c + 1, dnfs, start=lambda _: 1)%0Ajoin_by(lambda l, v: l + len(v), dnfs, start=len)%0A# join_by(list.append, dnfs, initial=%5B%5D)%0A%0Ajoin_by(lambda l, v: l + len(v), dnfs, 0)%0A
79602383ece3835e6ed94d14f3254190104bd03d
Fix aliases with bash
thefuck/shells/bash.py
thefuck/shells/bash.py
import os from ..conf import settings from ..const import ARGUMENT_PLACEHOLDER from ..utils import memoize from .generic import Generic class Bash(Generic): def app_alias(self, alias_name): # It is VERY important to have the variables declared WITHIN the function return ''' function {name} () {{ TF_PREVIOUS=$(fc -ln -1); TF_CMD=$( TF_ALIAS={name} TF_SHELL_ALIASES=$(alias) PYTHONIOENCODING=utf-8 thefuck $TF_PREVIOUS {argument_placeholder} $@ ) && eval $TF_CMD; {alter_history} }} '''.format( name=alias_name, argument_placeholder=ARGUMENT_PLACEHOLDER, alter_history=('history -s $TF_CMD;' if settings.alter_history else '')) def _parse_alias(self, alias): name, value = alias.replace('alias ', '', 1).split('=', 1) if value[0] == value[-1] == '"' or value[0] == value[-1] == "'": value = value[1:-1] return name, value @memoize def get_aliases(self): raw_aliases = os.environ.get('TF_SHELL_ALIASES', '').split('\n') return dict(self._parse_alias(alias) for alias in raw_aliases if alias and '=' in alias) def _get_history_file_name(self): return os.environ.get("HISTFILE", os.path.expanduser('~/.bash_history')) def _get_history_line(self, command_script): return u'{}\n'.format(command_script) def how_to_configure(self): if os.path.join(os.path.expanduser('~'), '.bashrc'): config = '~/.bashrc' elif os.path.join(os.path.expanduser('~'), '.bash_profile'): config = '~/.bash_profile' else: config = 'bash config' return self._create_shell_configuration( content=u'eval $(thefuck --alias)', path=config, reload=u'source {}'.format(config))
Python
0.000001
@@ -412,16 +412,23 @@ +export TF_ALIAS @@ -455,16 +455,23 @@ +export TF_SHELL @@ -508,16 +508,23 @@ +export PYTHONIO
0d8bfef0a629f6f8fb07415df21812eb1d458cde
Remove unnecessary lines after Android gyp fix Review URL: https://codereview.appspot.com/6353066
gyp/bench.gyp
gyp/bench.gyp
# GYP file to build performance testbench. # { 'includes': [ 'apptype_console.gypi', ], 'targets': [ { 'target_name': 'bench', 'type': 'executable', 'include_dirs' : [ '../src/core', '../src/gpu', ], 'includes': [ 'bench.gypi' ], 'dependencies': [ 'core.gyp:core', 'effects.gyp:effects', 'gpu.gyp:gr', 'gpu.gyp:skgr', 'images.gyp:images', 'ports.gyp:ports', 'utils.gyp:utils', 'bench_timer', ], }, { 'target_name' : 'bench_timer', 'type': 'static_library', 'sources': [ '../bench/BenchTimer.h', '../bench/BenchTimer.cpp', '../bench/BenchSysTimer_mach.h', '../bench/BenchSysTimer_mach.cpp', '../bench/BenchSysTimer_posix.h', '../bench/BenchSysTimer_posix.cpp', '../bench/BenchSysTimer_windows.h', '../bench/BenchSysTimer_windows.cpp', '../bench/BenchGpuTimer_gl.h', '../bench/BenchGpuTimer_gl.cpp', ], 'include_dirs': [ '../src/core', '../src/gpu', ], 'dependencies': [ 'core.gyp:core', 'gpu.gyp:gr', ], 'conditions': [ [ 'skia_os != "mac"', { 'sources!': [ '../bench/BenchSysTimer_mach.h', '../bench/BenchSysTimer_mach.cpp', ], }], [ 'skia_os not in ["linux", "freebsd", "openbsd", "solaris", "android"]', { 'sources!': [ '../bench/BenchSysTimer_posix.h', '../bench/BenchSysTimer_posix.cpp', ], }], [ 'skia_os in ["linux", "freebsd", "openbsd", "solaris"]', { 'link_settings': { 'libraries': [ '-lrt', ], }, }], [ 'skia_os != "win"', { 'sources!': [ '../bench/BenchSysTimer_windows.h', '../bench/BenchSysTimer_windows.cpp', ], }], [ 'skia_os == "android"', { 'dependencies!': [ 'android_system.gyp:Android_EntryPoint', ], }], ], } ], } # Local Variables: # tab-width:2 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=2 shiftwidth=2:
Python
0.000264
@@ -2008,151 +2008,8 @@ %7D%5D,%0A - %5B 'skia_os == %22android%22', %7B%0A 'dependencies!': %5B%0A 'android_system.gyp:Android_EntryPoint',%0A %5D,%0A %7D%5D,%0A
235cc3a7529b36e11a7935e15c90f496210d7c31
implement method for generating request signature
scup/auth.py
scup/auth.py
Python
0
@@ -0,0 +1,228 @@ +import hashlib%0Aimport time%0A%0Adef get_request_signature(private_key):%0A%09current_time = int(time.time())%0A%0A%09message = '%7B%7D%7B%7D'.format(current_time, private_key)%0A%0A%09digest = hashlib.md5(message).hexdigest()%0A%0A%09return current_time, digest%0A
5834f2e259834b325cf076b36af634dc6b64f442
Add info if not parsed
intelmq/bots/parsers/generic/parser.py
intelmq/bots/parsers/generic/parser.py
from intelmq.lib.bot import Bot, sys from intelmq.lib.message import Event from intelmq.bots import utils import re class GenericBot(Bot): # Generic parser, will simply parse and add named group to event # for example if you have the regex : # '^\s*(?P<ip>(?:(?:\d){1,3}\.){3}\d{1,3})' # You will have an item 'ip' in your event. def process(self): report = self.receive_message() if report: rowcount = 0 for row in report.split('\n'): # For each line self.logger.debug(row) self.logger.debug(self.parameters.regex) event = Event() match = re.search(self.parameters.regex, row) if match: for key in match.groupdict(): event.add(key, matchtuple[key]) else: continue # skip lines without matching regex rowcount += 1 # Get detail from parser parameters, will be nice to have it by # source parameters.. event.add('feed', self.parameters.feed) event.add('feed_url', self.parameters.feed_url) event.add('type', self.parameters.type) event = utils.parse_source_time(event, "source_time") event = utils.generate_observation_time(event, "observation_time") event = utils.generate_reported_fields(event) self.send_message(event) self.logger.info("Processed %d event" % rowcount) self.acknowledge_message() if __name__ == "__main__": bot = GenericBot(sys.argv[1]) bot.start()
Python
0
@@ -389,16 +389,88 @@ ssage()%0A + self.logger.debug(%22Will apply regex %25s%22 %25 self.parameters.regex) %0A @@ -570,47 +570,8 @@ ine%0A - self.logger.debug(row)%0A @@ -1088,129 +1088,318 @@ rs.. -%0A event.add('feed', self.parameters.feed)%0A event.add('feed_url', self.parameters.feed_url)%0A + Avoid adding if parsed%0A if not 'feed' in match.groupdict():%0A event.add('feed', self.parameters.feed)%0A if not 'feed_url' in match.groupdict():%0A event.add('feed_url', self.parameters.feed_url)%0A if not 'type' in match.groupdict():%0A
9b5590458463744597da1769694e826ed9c27414
Comment failing doctests.
scikits/learn/utils/crossval.py
scikits/learn/utils/crossval.py
# Author: Alexandre Gramfort <[email protected]> # License: BSD Style. # $Id$ import exceptions import numpy as np def leave_one_out(n): """ Leave-One-Out cross validation: Provides train/test indexes to split data in train test sets Parameters =========== n: int Total number of elements Examples ======== >>> import numpy as np >>> from scikits.learn.utils import crossval >>> n_samples, n_features = 5, 10 >>> X = np.random.randn(n_samples, n_features) >>> loo = crossval.leave_one_out(n_samples) >>> for train_index, test_index in loo: ... print "TRAIN:", train_index, "TEST:", test_index ... Xtrain, Xtest, Ytrain, Ytest = split(train_index, test_index, X, y) ... print Xtrain, Xtest, Ytrain, Ytest """ for i in xrange(n): test_index = np.zeros(n, dtype=np.bool) test_index[i] = True train_index = np.logical_not(test_index) yield train_index, test_index def k_fold(n, k): """ K-Folds cross validation: Provides train/test indexes to split data in train test sets Parameters =========== n: int Total number of elements k: int number of folds Note ==== All the folds have size trunc(n/k), the last one has the complementary """ assert k>0, ValueError('cannot have k below 1') assert k<n, ValueError('cannot have k=%d greater than %d'% (k, n)) j = np.ceil(n/k) for i in xrange(k): test_index = np.zeros(n, dtype=np.bool) if i<k-1: test_index[i*j:(i+1)*j] = True else: test_index[i*j:] = True train_index = np.logical_not(test_index) yield train_index, test_index def split(train_indexes, test_indexes, *args): """ For each arg return a train and test subsets defined by indexes provided in train_indexes and test_indexes """ ret = [] for arg in args: arg_train = arg[train_indexes,:] arg_test = arg[test_indexes,:] ret.append(arg_train) ret.append(arg_test) return ret if __name__ == "__main__": print "Leave One Out crossvalidation" n_samples, n_features = 4, 2 X = np.random.randn(n_samples, n_features) y = np.random.randn(n_samples) print X loo = leave_one_out(n_samples) for train_index, test_index in loo: print "TRAIN:", train_index, "TEST:", test_index Xtrain, Xtest, Ytrain, Ytest = split(train_index, test_index, X, y) print Xtrain, Xtest, Ytrain, Ytest print "K-Fold crossvalidation" k = 2 kf = k_fold(n_samples, k) for train_index, test_index in kf: print "TRAIN:", train_index, "TEST:", test_index
Python
0
@@ -357,20 +357,59 @@ ======%0A + # commented doctest, see issue #34 %0A + # %3E%3E%3E imp @@ -423,24 +423,26 @@ py as np%0A + # %3E%3E%3E from sc @@ -474,24 +474,26 @@ crossval%0A + # %3E%3E%3E n_sampl @@ -514,24 +514,26 @@ = 5, 10%0A + # %3E%3E%3E X = np. @@ -567,24 +567,26 @@ eatures)%0A + # %3E%3E%3E loo = c @@ -621,16 +621,18 @@ les)%0A + # %3E%3E%3E for @@ -663,24 +663,26 @@ in loo:%0A + # ... prin @@ -725,24 +725,26 @@ st_index%0A + # ... Xtra @@ -810,16 +810,18 @@ , y)%0A + # ...
d046968c5b16239b4ce3fbe17b6359339f3e7b9b
Add vcf convertor
utils/vcf_convertor.py
utils/vcf_convertor.py
Python
0.000001
@@ -0,0 +1,596 @@ +#! -*- coding: utf-8 -*-%0A%0Aimport re%0Aimport json%0A%0Aperson_patten = re.compile(r'BEGIN:VCARD(.*?)END:VCARD', re.DOTALL)%0Afullname_patten = re.compile(r'FN:(.*?)%5Cn')%0Amobile_patten = re.compile(r':%5C+*?(%5Cd%7B9%7D%5Cd*?)%5Cn')%0A%0Af = open(r'iCloud vCard.vcf')%0Afc = f.read()%0Apeople = person_patten.findall(fc)%0A%0Anames = %7B%7D%0Afor p in people:%0A for i in fullname_patten.findall(p):%0A name = i%0A p = p.replace(%22-%22, %22%22)%0A for i in mobile_patten.findall(p):%0A if len(i) == 13 and i%5B:2%5D == %2286%22:%0A i = i%5B2:%5D%0A names%5Bi%5D = name%0A%0Afl = open(%22dump%22, %22w%22)%0Afl.write(json.dumps(names))%0Afl.close()%0A
3a1b4ceb2ae989495d2453c612ac6645fdf59726
Create cisco_vlan_extract.py
cisco/cisco_vlan_extract.py
cisco/cisco_vlan_extract.py
Python
0.000049
@@ -0,0 +1,1467 @@ +from ciscoconfparse import CiscoConfParse as ccp%0A%0A%0Adef extract_vlan(vlans):%0A %22%22%22%0A Will convert ACTIVE vlans in the 'show vlan' command .....%0A %0A switch#show vlan%0A VLAN Name Status Ports%0A ---- -------------------------------- --------- -------------------------------%0A 1 default active Fa0/48%0A 2 AAAAA active%0A 3 BBBBB active%0A 4 CCCCC active Fa0/1, Fa0/2, Fa0/3, Fa0/4, Fa0/5, Fa0/6, Fa0/7%0A 5 DDDDD active%0A 6 EEEEE active%0A 7 FFFFF active Fa0/25, Fa0/26, Fa0/27, Fa0/28, Fa0/29, Fa0/30%0A 1002 fddi-default act/unsup%0A 1003 token-ring-default act/unsup%0A 1004 fddinet-default act/unsup%0A 1005 trnet-default act/unsup%0A %0A To configuration like this .....%0A vlan 2%0A name AAAAA%0A vlan 3%0A name BBBBB%0A vlan 4%0A name CCCCC%0A vlan 5%0A name DDDDD%0A vlan 6%0A name EEEEE%0A vlan 7%0A name FFFFF%0A %22%22%22%0A active_vlans = vlans.find_objects(%22active%22)%0A for i in active_vlans:%0A if not %22 %22.join(i.text.split()%5B0:1%5D) == %221%22:%0A print(%22vlan%22, %22 %22.join(i.text.split()%5B0:1%5D))%0A print(%22 name%22,%22 %22.join(i.text.split()%5B1:2%5D))%0A%0Aextract_vlan(ccp(%22show_vlan.txt%22))%0A
08d66a82ea47832654aa17f0323df6ce57691fcb
add setup.py
verdenskart/setup.py
verdenskart/setup.py
Python
0.000001
@@ -0,0 +1,252 @@ +#!/usr/bin/env python%0A%0Afrom setuptools import setup, find_packages%0A%0Asetup(%0A name=%22bokeh-worldmap%22,%0A version=%220.1.0%22,%0A packages=find_packages(%22src%22),%0A package_data=%7B%7D,%0A package_dir=%7B%22%22: %22src%22%7D,%0A entry_points=%7B%22console_scripts%22: %5B%5D%7D,%0A)%0A
421ba879a36c1bd817fa27aa780476d2008eb6d0
Revert "Add sleep between image detach"
fuel_health/tests/smoke/test_create_volume.py
fuel_health/tests/smoke/test_create_volume.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import time from fuel_health import nmanager LOG = logging.getLogger(__name__) class VolumesTest(nmanager.SmokeChecksTest): @classmethod def setUpClass(cls): super(VolumesTest, cls).setUpClass() cls.smoke_flavor = cls._create_nano_flavor() def setUp(self): super(VolumesTest, self).setUp() if (not self.config.volume.cinder_node_exist and not self.config.volume.ceph_exist): self.fail('There are no cinder nodes or ceph storage for volume') if not self.config.compute.compute_nodes: self.fail('There are no compute nodes') @classmethod def tearDownClass(cls): super(VolumesTest, cls).tearDownClass() def _wait_for_volume_status(self, volume, status): self.status_timeout(self.volume_client.volumes, volume.id, status) def _wait_for_instance_status(self, server, status): self.status_timeout(self.compute_client.servers, server.id, status) def test_volume_create(self): """Create volume and attach it to instance Target component: Compute Scenario: 1. Create a new small-size volume. 2. Wait for volume status to become "available". 3. Check volume has correct name. 4. Create new instance. 5. Wait for "Active" status 6. Attach volume to an instance. 7. Check volume status is "in use". 8. Get information on the created volume by its id. 9. Detach volume from the instance. 10. Check volume has "available" status. 11. Delete volume. 12. Delete server. Duration: 200 s. """ msg_s1 = 'Volume was not created.' #Create volume volume = self.verify(120, self._create_volume, 1, msg_s1, "volume creation", self.volume_client) self.verify(200, self._wait_for_volume_status, 2, msg_s1, "volume becoming 'available'", volume, 'available') self.verify_response_true( volume.display_name.startswith('ost1_test-volume'), 'Step 3 failed: {msg}'.format(msg=msg_s1)) # create instance instance = self.verify(200, self._create_server, 4, "Instance creation failed. ", "server creation", self.compute_client) self.verify(200, self._wait_for_instance_status, 5, 'Instance status did not become "available".', "instance becoming 'available'", instance, 'ACTIVE') # Attach volume self.verify(120, self._attach_volume_to_instance, 6, 'Volume couldn`t be attached.', 'volume attachment', volume, instance.id) self.verify(180, self._wait_for_volume_status, 7, 'Attached volume status did not become "in-use".', "volume becoming 'in-use'", volume, 'in-use') self.attached = True # get volume details volume_details = self.verify(20, self.volume_client.volumes.get, 8, "Can not retrieve volume " "details. ", "retreiving volume details", volume.id) # detach volume self.verify(50, self._detach_volume, 9, 'Can not detach volume. ', "volume detachment", self.volume_client, volume) self.verify(120, self._wait_for_volume_status, 10, 'Volume status did not become "available".', "volume becoming 'available'", volume, 'available') # Sleeping due to non-syncronous procedure and potential race. time.sleep(5) self.verify(50, self.volume_client.volumes.delete, 11, 'Can not delete volume. ', "volume deletion", volume) self.verify(30, self._delete_server, 12, "Can not delete server. ", "server deletion", instance)
Python
0
@@ -631,20 +631,8 @@ ging -%0Aimport time %0A%0Afr @@ -4618,86 +4618,8 @@ e')%0A -%09# Sleeping due to non-syncronous procedure and potential race.%0A%09time.sleep(5) %0A
5bb7d25765655f83c42b5e7abc1093f7f85f7950
bump version to 0.8.16
mycroft/version/__init__.py
mycroft/version/__init__.py
# Copyright 2016 Mycroft AI, Inc. # # This file is part of Mycroft Core. # # Mycroft Core is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Mycroft Core is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>. import json from genericpath import exists, isfile from mycroft.util.log import getLogger __author__ = 'augustnmonteiro' # The following lines are replaced during the release process. # START_VERSION_BLOCK CORE_VERSION_MAJOR = 0 CORE_VERSION_MINOR = 8 CORE_VERSION_BUILD = 15 # END_VERSION_BLOCK CORE_VERSION_STR = (str(CORE_VERSION_MAJOR) + "." + str(CORE_VERSION_MINOR) + "." + str(CORE_VERSION_BUILD)) LOG = getLogger(__name__) class VersionManager(object): __location = "/opt/mycroft/version.json" @staticmethod def get(): if (exists(VersionManager.__location) and isfile(VersionManager.__location)): try: with open(VersionManager.__location) as f: return json.load(f) except: LOG.error("Failed to load version from '%s'" % VersionManager.__location) return {"coreVersion": None, "enclosureVersion": None}
Python
0
@@ -984,9 +984,9 @@ = 1 -5 +6 %0A# E
3088fcd2d42b4e59601c103cc01cec1d949f6f57
Improve OldPersian
ielex/lexicon/migrations/0093_fix_oldPersian.py
ielex/lexicon/migrations/0093_fix_oldPersian.py
Python
0
@@ -0,0 +1,1173 @@ +# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0Afrom django.db import migrations%0A%0A%0Adef forwards_func(apps, schema_editor):%0A '''%0A OldPersian doesn't have lexemes for some meanings.%0A This migration generates them.%0A '''%0A # Models to work with:%0A Language = apps.get_model('lexicon', 'Language')%0A MeaningList = apps.get_model('lexicon', 'MeaningList')%0A Lexeme = apps.get_model('lexicon', 'Lexeme')%0A # Data to work with:%0A target = Language.objects.get(ascii_name='OldPersian')%0A # Mapping meaning.id -%3E Lexeme%0A mIdLexemeMap = %7B%7D%0A for l in Lexeme.objects.filter(language=target).all():%0A mIdLexemeMap%5Bl.meaning_id%5D = l%0A # Searching for missing lexemes:%0A mList = MeaningList.objects.get(name='Jena200')%0A for m in mList.meanings.all():%0A if m.id not in mIdLexemeMap:%0A Lexeme.objects.create(%0A meaning=m,%0A language=target)%0A%0A%0Adef reverse_func(apps, schema_editor):%0A pass%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B('lexicon', '0092_set_cjc_reliabilities_high')%5D%0A%0A operations = %5B%0A migrations.RunPython(forwards_func, reverse_func),%0A %5D%0A
6516b73210a575376bc78005ae28c0e843303b24
add theano how-to-perform
Theano/how-to-perform-stencil-computations-element-wise-on-a-matrix-in-theano.py
Theano/how-to-perform-stencil-computations-element-wise-on-a-matrix-in-theano.py
Python
0.000071
@@ -0,0 +1,1177 @@ +import numpy as np%0Aimport theano%0Aimport theano.tensor as T%0Afrom theano.tensor.nnet import conv2d%0A%0A# original image 3D (3x3x4) (RGB Channel, height, width)%0Aimg = %5B%5B%5B1, 2, 3, 4%5D,%0A %5B1, 1, 3, 1%5D,%0A %5B1, 3, 1, 1%5D%5D,%0A%0A %5B%5B2, 2, 3, 4%5D,%0A %5B2, 2, 3, 2%5D,%0A %5B2, 3, 2, 2%5D%5D,%0A%0A %5B%5B3, 2, 3, 4%5D,%0A %5B3, 3, 3, 3%5D,%0A %5B3, 3, 3, 3%5D%5D%5D%0A%0A# separate and reshape each channel to 4D %0A# separated because convolution works on each channel only%0AR = np.asarray(%5B%5Bimg%5B0%5D%5D%5D, dtype='float32')%0AG = np.asarray(%5B%5Bimg%5B1%5D%5D%5D, dtype='float32')%0AB = np.asarray(%5B%5Bimg%5B2%5D%5D%5D, dtype='float32') %0A%0A# 4D kernel from the original : %5B1,0,1%5D%0A# rotated because convolution works only on column%0Akernel = np.asarray(%5B%5B%5B%5B1%5D,%5B0%5D,%5B1%5D%5D%5D%5D, dtype='float32')%0A%0A# theano convolution%0At_img = T.ftensor4(%22t_img%22)%0At_kernel = T.ftensor4(%22t_kernel%22)%0Aresult = conv2d(%0A input = t_img,%0A filters=t_kernel,%0A filter_shape=(1,1,1,3),%0A border_mode = 'half')%0Af = theano.function(%5Bt_img,t_kernel%5D,result)%0A%0A# compute each channel%0AR = f(R,kernel)%0AG = f(G,kernel)%0AB = f(B,kernel)%0A%0A# merge and reshape again%0Aimg = np.asarray(%5BR,G,B%5D)%0Aimg = np.reshape(img,(3,3,4))%0Aprint img
a99f0678815c2e998c25a0aaf9f2c79ad0d18610
Add package 'ui'
source/ui/__init__.py
source/ui/__init__.py
Python
0.000034
@@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*-%0A%0A## %5Cpackage ui%0A%0A# MIT licensing%0A# See: LICENSE.txt%0A
00b995719aaf11c2d7c3126e29b94b74f0edf8d2
add test
osf_tests/test_downloads_summary.py
osf_tests/test_downloads_summary.py
Python
0.000002
@@ -0,0 +1,1058 @@ +# encoding: utf-8%0Aimport mock%0Aimport pytest%0Aimport pytz%0Aimport datetime%0A%0Afrom django.utils import timezone%0A%0Afrom addons.osfstorage import utils%0Afrom addons.osfstorage.tests.utils import StorageTestCase%0A%0Afrom osf_tests.factories import ProjectFactory%0A%0Afrom scripts.analytics.download_count_summary import DownloadCountSummary%0A%0A%[email protected]_db%0Aclass TestDownloadCount(StorageTestCase):%0A%0A def test_download_count(self):%0A # Keen does not allow same day requests so we have to do some time traveling to my birthday%0A timezone.now = mock.Mock(return_value=datetime.datetime(1991, 9, 25).replace(tzinfo=pytz.utc))%0A node = ProjectFactory()%0A%0A utils.update_analytics(node, 'fake id', %7B'contributors': node.contributors%7D)%0A%0A # Now back to the future, querying old date.%0A timezone.now = mock.Mock(return_value=datetime.datetime.now().replace(tzinfo=pytz.utc))%0A query_date = datetime.date(1991, 9, 25)%0A%0A event = DownloadCountSummary().get_events(query_date)%0A%0A assert event%5B0%5D%5B'files'%5D%5B'total'%5D == 1%0A
d764a483497afc5d029a82db14cc5cc88f45f4c0
Add an extension to allow for an addFixedIp action on instances
nova/api/openstack/contrib/multinic.py
nova/api/openstack/contrib/multinic.py
Python
0
@@ -0,0 +1,2771 @@ +# Copyright 2011 OpenStack LLC.%0A# All Rights Reserved.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22); you may%0A# not use this file except in compliance with the License. You may obtain%0A# a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS, WITHOUT%0A# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the%0A# License for the specific language governing permissions and limitations%0A# under the License.%0A%0A%22%22%22The multinic extension.%22%22%22%0A%0Afrom webob import exc%0A%0Afrom nova import compute%0Afrom nova import log as logging%0Afrom nova.api.openstack import extensions%0Afrom nova.api.openstack import faults%0A%0A%0ALOG = logging.getLogger(%22nova.api.multinic%22)%0A%0A%0Aclass Multinic(extensions.ExtensionDescriptor):%0A def __init__(self, *args, **kwargs):%0A super(Multinic, self).__init__(*args, **kwargs)%0A self.compute_api = compute.API()%0A%0A def get_name(self):%0A return %22Multinic%22%0A%0A def get_alias(self):%0A return %22NMN%22%0A%0A def get_description(self):%0A return %22Multiple network support%22%0A%0A def get_namespace(self):%0A return %22http://docs.openstack.org/ext/multinic/api/v1.1%22%0A%0A def get_updated(self):%0A return %222011-06-09T00:00:00+00:00%22%0A%0A def get_actions(self):%0A actions = %5B%5D%0A%0A # Add the add_fixed_ip action%0A act = extensions.ActionExtension(%22servers%22, %22addFixedIp%22,%0A self._add_fixed_ip)%0A actions.append(act)%0A%0A # Add the remove_fixed_ip action%0A act = extensions.ActionExtension(%22servers%22, %22removeFixedIp%22,%0A self._remove_fixed_ip)%0A actions.append(act)%0A%0A return actions%0A%0A def _add_fixed_ip(self, input_dict, req, id):%0A %22%22%22Adds an IP on a given network to an instance.%22%22%22%0A try:%0A # Validate the input entity%0A if 'networkId' not in input_dict%5B'addFixedIp'%5D:%0A LOG.exception(_(%22Missing 'networkId' argument for addFixedIp%22))%0A return faults.Fault(exc.HTTPUnprocessableEntity())%0A%0A # Add the fixed IP%0A network_id = input_dict%5B'addFixedIp'%5D%5B'networkId'%5D%0A self.compute_api.add_fixed_ip(req.environ%5B'nova.context'%5D, id,%0A network_id)%0A except Exception, e:%0A LOG.exception(_(%22Error in addFixedIp %25s%22), e)%0A return faults.Fault(exc.HTTPBadRequest())%0A return exc.HTTPAccepted()%0A%0A def _remove_fixed_ip(self, input_dict, req, id):%0A # Not yet implemented%0A raise faults.Fault(exc.HTTPNotImplemented())%0A
c13d1347889cf574d3e6b9b835dadbca5fdc2d6c
Add wheel module for the salt key system
salt/wheel/key.py
salt/wheel/key.py
Python
0
@@ -0,0 +1,878 @@ +'''%0AWheel system wrapper for key system%0A'''%0A%0Aimport salt.key%0A%0Adef list_all():%0A '''%0A List the keys under a named status%0A '''%0A skey = salt.key.Key(__opts__)%0A return skey.list_all()%0A%0Adef accept(match):%0A '''%0A Accept keys based on a glob match%0A '''%0A skey = salt.key.Key(__opts__)%0A return skey.accept(match)%0A%0Adef delete(match):%0A '''%0A Delete keys based on a glob match%0A '''%0A skey = salt.key.Key(__opts__)%0A return skey.delete(match)%0A%0Adef reject(match):%0A '''%0A Delete keys based on a glob match%0A '''%0A skey = salt.key.Key(__opts__)%0A return skey.reject(match)%0A%0Adef key_str(match):%0A '''%0A Return the key strings%0A '''%0A skey = salt.key.Key(__opts__)%0A return skey.key_str(match)%0A%0Adef finger(match):%0A '''%0A Return the matching key fingerprints%0A '''%0A skey = salt.key.Key(__opts__)%0A return skey.finger(match)%0A
95a8ed6dcb19f322c9a14957da207efb8be10f5d
Customize makemessages to support ignoring fuzzy
hqscripts/management/commands/makemessages.py
hqscripts/management/commands/makemessages.py
Python
0
@@ -0,0 +1,1022 @@ +from django.core.management.commands import makemessages%0A%0A%0Aclass Command(makemessages.Command):%0A def add_arguments(self, parser):%0A super().add_arguments(parser)%0A parser.add_argument('--no-fuzzy', action='store_true', help='Remove fuzzy strings.')%0A%0A def handle(self, *args, **options):%0A no_fuzzy = options%5B'no_fuzzy'%5D%0A if no_fuzzy:%0A # The underlying parser only passes custom msgattrib_options if '--no-obsolete' is true,%0A # so we have to do a bit of hacking here%0A no_obsolete = options%5B'no_obsolete'%5D%0A if no_obsolete:%0A # If we are removing obsolete messages already, just add in removing fuzzy messages%0A self.msgattrib_options += %5B'--no-fuzzy'%5D%0A else:%0A # Otherwise, we need to fake obsolete messages while only actually removing fuzzy messages%0A options%5B'no_obsolete'%5D = True%0A self.msgattrib_options = %5B'--no-fuzzy'%5D%0A%0A super().handle(*args, **options)%0A
597a1c12223fec5deefcd31b3a00b06d1095b32d
Add check replication step
dbaas/workflow/steps/util/region_migration/check_replication.py
dbaas/workflow/steps/util/region_migration/check_replication.py
Python
0
@@ -0,0 +1,1109 @@ +# -*- coding: utf-8 -*-%0Aimport logging%0Afrom util import full_stack%0Afrom workflow.steps.util.base import BaseStep%0Afrom workflow.exceptions.error_codes import DBAAS_0020%0Afrom time import sleep%0ALOG = logging.getLogger(__name__)%0A%0A%0Aclass CheckReplication(BaseStep):%0A%0A def __unicode__(self):%0A return %22Checking replication...%22%0A%0A def do(self, workflow_dict):%0A try:%0A databaseinfra = workflow_dict%5B'databaseinfra'%5D%0A driver = databaseinfra.get_driver()%0A%0A instance = workflow_dict%5B'source_instances'%5D%5B0%5D.future_instance%0A%0A for attempt in range(0, 21):%0A LOG.info(%22Waiting 10s to check replication...%22)%0A sleep(10)%0A%0A if driver.is_replication_ok(instance):%0A return True%0A%0A except Exception:%0A traceback = full_stack()%0A%0A workflow_dict%5B'exceptions'%5D%5B'error_codes'%5D.append(DBAAS_0020)%0A workflow_dict%5B'exceptions'%5D%5B'traceback'%5D.append(traceback)%0A%0A return False%0A%0A def undo(self, workflow_dict):%0A LOG.info(%22Running undo...%22)%0A return True%0A
fb6c84e7703092f495324fe57041717403803e7f
Add scrape_symbols.py placeholder.
scrape_symbols.py
scrape_symbols.py
Python
0
@@ -0,0 +1,100 @@ +#!/usr/bin/env python%0A# encoding: utf-8%0Adef main():%0A pass%0A%0Aif __name__ == '__main__':%0A main()%0A
fe479bf2a8ec547922c6643bbdf0ba768eb79c9d
Add script to simulate multiple games
ludo/simulator.py
ludo/simulator.py
Python
0
@@ -0,0 +1,682 @@ +#!/usr/bin/env python3%0Afrom game import Game%0A%0Aprint(%22Welcome to a game of ludo!%22)%0A%0Aaverage_throw_counter = 0%0Amin_throws_per_game = 10000000%0Amax_throws_per_game = 0%0ANUM_GAMES = 100%0A%0Afor i in range(0, NUM_GAMES):%0A%0A game = Game()%0A throw_counter = 0%0A%0A while game.next_move():%0A throw_counter += 1%0A%0A average_throw_counter += throw_counter%0A if throw_counter %3C min_throws_per_game:%0A min_throws_per_game = throw_counter%0A if throw_counter %3E max_throws_per_game:%0A max_throws_per_game = throw_counter%0A%0A print(%22Game:%22, i+1)%0A%0Aprint(%22Average throws:%22, average_throw_counter/NUM_GAMES)%0Aprint(%22Min%22, min_throws_per_game)%0Aprint(%22Max%22, max_throws_per_game)%0A%0A%0A
b5b21a151b219ae5f9a017ea0bda95c1d0be92ca
Fix Csv validation
tools/telemetry/telemetry/csv_page_benchmark_results.py
tools/telemetry/telemetry/csv_page_benchmark_results.py
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from telemetry.page_benchmark_results import PageBenchmarkResults class CsvPageBenchmarkResults(PageBenchmarkResults): def __init__(self, results_writer, output_after_every_page): super(CsvPageBenchmarkResults, self).__init__() self._results_writer = results_writer self._did_output_header = False self._header_names_written_to_writer = None self._output_after_every_page = output_after_every_page def DidMeasurePage(self): assert self.values_for_current_page, 'Failed to call WillMeasurePage' if not self._output_after_every_page: super(CsvPageBenchmarkResults, self).DidMeasurePage() return if not self._did_output_header: self._OutputHeader() else: self._ValidateOutputNamesForCurrentPage() self._OutputValuesForPage(self.values_for_current_page) super(CsvPageBenchmarkResults, self).DidMeasurePage() def PrintSummary(self, trace_tag): if not self._output_after_every_page: self._OutputHeader() for page_values in self.all_values_for_all_pages: self._OutputValuesForPage(page_values) super(CsvPageBenchmarkResults, self).PrintSummary(trace_tag) def _ValidateOutputNamesForCurrentPage(self): assert self._did_output_header current_page_measurement_names = \ self.values_for_current_page.measurement_names if self._header_names_written_to_writer == current_page_measurement_names: return assert False, """To use CsvPageBenchmarkResults, you must add the same result names for every page. In this case, first page output: %s Thus, all subsequent pages must output this as well. Instead, the current page output: %s Change your test to produce the same thing each time, or modify MultiPageBenchmark.results_are_the_same_on_every_page to return False. """ % (repr(self._header_names_written_to_writer), repr(current_page_measurement_names)) def _OutputHeader(self): assert not self._did_output_header all_measurement_names = list( self.all_measurements_that_have_been_seen.keys()) all_measurement_names.sort() self._did_output_header = True self._header_names_written_to_writer = list(all_measurement_names) row = ['url'] for measurement_name in all_measurement_names: measurement_data = \ self.all_measurements_that_have_been_seen[measurement_name] row.append('%s (%s)' % (measurement_name, measurement_data['units'])) self._results_writer.writerow(row) def _OutputValuesForPage(self, page_values): row = [page_values.page.url] for measurement_name in self._header_names_written_to_writer: value = page_values.FindValueByMeasurementName(measurement_name) if value: row.append('%s' % value.output_value) else: row.append('-') self._results_writer.writerow(row)
Python
0.000011
@@ -1443,24 +1443,28 @@ = %5C%0A +set( self.values_ @@ -1501,22 +1501,106 @@ ames +) %0A -if self._ +header_names_written_to_writer = %5C%0A set(self._header_names_written_to_writer)%0A if head @@ -2053,22 +2053,16 @@ %25 (repr( -self._ header_n
a8f172752a72d93537820322b9ce62b601be6c5f
Fix cpplint warning.
script/cpplint.py
script/cpplint.py
#!/usr/bin/env python import fnmatch import os import subprocess import sys IGNORE_FILES = [ 'app/win/resource.h', 'browser/atom_application_mac.h', 'browser/atom_application_delegate_mac.h', 'browser/native_window_mac.h', 'browser/ui/cocoa/event_processing_window.h', 'browser/ui/cocoa/atom_menu_controller.h', 'browser/ui/cocoa/nsalert_synchronous_sheet.h', 'common/api/api_messages.cc', 'common/api/api_messages.h', 'common/atom_version.h', 'common/swap_or_assign.h', ] SOURCE_ROOT = os.path.dirname(os.path.dirname(__file__)) def main(): os.chdir(SOURCE_ROOT) files = list_files(['app', 'browser', 'common', 'renderer'], ['*.cc', '*.h']) call_cpplint(list(set(files) - set(IGNORE_FILES))) def list_files(directories, filters): matches = [] for directory in directories: for root, _, filenames, in os.walk(directory): for f in filters: for filename in fnmatch.filter(filenames, f): matches.append(os.path.join(root, filename)) return matches def call_cpplint(files): cpplint = os.path.join(SOURCE_ROOT, 'vendor', 'depot_tools', 'cpplint.py') rules = '--filter=-build/header_guard,-build/include_what_you_use' subprocess.check_call([sys.executable, cpplint, rules] + files) if __name__ == '__main__': sys.exit(main())
Python
0
@@ -92,32 +92,8 @@ = %5B%0A - 'app/win/resource.h',%0A 'b @@ -194,32 +194,70 @@ _window_mac.h',%0A + 'browser/resources/win/resource.h',%0A 'browser/ui/co
4c5e4cb960a266482dac21eaeb0b568359c58b39
Add py-backcall (#8701)
var/spack/repos/builtin/packages/py-backcall/package.py
var/spack/repos/builtin/packages/py-backcall/package.py
Python
0.000001
@@ -0,0 +1,1549 @@ +##############################################################################%0A# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.%0A# Produced at the Lawrence Livermore National Laboratory.%0A#%0A# This file is part of Spack.%0A# Created by Todd Gamblin, [email protected], All rights reserved.%0A# LLNL-CODE-647188%0A#%0A# For details, see https://github.com/spack/spack%0A# Please also see the NOTICE and LICENSE files for our notice and the LGPL.%0A#%0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Lesser General Public License (as%0A# published by the Free Software Foundation) version 2.1, February 1999.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and%0A# conditions of the GNU Lesser General Public License for more details.%0A#%0A# You should have received a copy of the GNU Lesser General Public%0A# License along with this program; if not, write to the Free Software%0A# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA%0A##############################################################################%0Afrom spack import *%0A%0A%0Aclass PyBackcall(PythonPackage):%0A %22%22%22Specifications for callback functions passed in to an API%22%22%22%0A%0A homepage = %22https://github.com/takluyver/backcall%22%0A url = %22https://pypi.io/packages/source/b/backcall/backcall-0.1.0.tar.gz%22%0A%0A version('0.1.0', '87ce0c7839808e6a3427d57df6a792e7')%0A
7975ef9f34cc578de968e1a1c8e6f731c164641a
Create 1.5_countstrings.py
CrackingCodingInterview/1.5_countstrings.py
CrackingCodingInterview/1.5_countstrings.py
Python
0.001266
@@ -0,0 +1,699 @@ +%22%22%22%0Agiven a string, return a string counting all the occurences %0Aof each character if the count %3E 1%0A%22%22%22%0A%0Adef compress(string_to_compress):%0A%0A if len(string_to_compress) %3C 2%0A return string_to_compress%0A%0A groups = %5B%5D%0A previous_character = string_to_compress%5B0%5D%0A counter = 1%0A%0A for c in string_to_compress%5B1:%5D:%0A if c == previous_character:%0A counter += 1%0A else:%0A groups.append(previous_character + str(counter))%0A previous_character = c%0A counter = 1%0A groups.append(c + str(counter))%0A result = ''.join(groups)%0A if len(result) %3C len(string_to_compress):%0A return result%0A else:%0A return string_to_compress%0A
c89cce1a47c1e379958d7cced624ec0317cd3407
Add demo for non-blocking with poll().
examples/demo3.py
examples/demo3.py
Python
0
@@ -0,0 +1,759 @@ +import os%0Aimport sys%0Asys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))%0Aimport logging%0Aimport threading%0Aimport xmpp2%0Aimport time%0Aimport select%0Afrom xmpp2 import XML%0A%0A# non-blocking, poll example.%0AUSERNAME = 'yourusername'%0APASSWORD = 'yourpassword'%0ASERVER = 'example.com'%0A%0Alogging.basicConfig(level=logging.DEBUG)%0Alogging.getLogger('xmpp2.xml.handler').setLevel(logging.INFO)%0A%0Ac = xmpp2.Client(SERVER, stream_log_level=xmpp2.LOG_NONE)%0Ac.connect()%0Ac.auth(USERNAME, password=PASSWORD)%0Ac.write(XML.presence.add(XML.priority.add(1)))%0Ac.setblocking(False)%0A%0Apo = select.poll()%0Apo.register(c, select.POLLIN)%0A%0Awhile True:%0A for fd, event in po.poll():%0A msg = c.gen.next()%0A if msg:%0A sys.stdout.write(msg.pretty_print() + '%5Cn')%0A
bb1ce480184d4e78f121f9e473e58f47b80de53a
Create FirstLinuxFile.py
FirstLinuxFile.py
FirstLinuxFile.py
Python
0
@@ -0,0 +1,11 @@ +#%EF%BC%81/usr/bin%0A
f724f5b488f23a6ceb2314aa18933b5fac3f5aab
Add courseware migration.
lms/djangoapps/courseware/migrations/0013_auto_20191001_1858.py
lms/djangoapps/courseware/migrations/0013_auto_20191001_1858.py
Python
0
@@ -0,0 +1,556 @@ +# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.25 on 2019-10-01 18:58%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('courseware', '0012_adjust_fields'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterModelOptions(%0A name='coursedynamicupgradedeadlineconfiguration',%0A options=%7B%7D,%0A ),%0A migrations.AlterModelOptions(%0A name='orgdynamicupgradedeadlineconfiguration',%0A options=%7B%7D,%0A ),%0A %5D%0A
1e65555a08ff3ee1a06e92d9dd054abf3cfaf711
Add a migration to update to final tree fields
media_tree/migrations/0003_alter_tree_fields.py
media_tree/migrations/0003_alter_tree_fields.py
Python
0
@@ -0,0 +1,909 @@ +# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('media_tree', '0002_mptt_to_treebeard'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='filenode',%0A name='depth',%0A field=models.PositiveIntegerField(db_index=True),%0A ),%0A migrations.AlterField(%0A model_name='filenode',%0A name='lft',%0A field=models.PositiveIntegerField(db_index=True),%0A ),%0A migrations.AlterField(%0A model_name='filenode',%0A name='rgt',%0A field=models.PositiveIntegerField(db_index=True),%0A ),%0A migrations.AlterField(%0A model_name='filenode',%0A name='tree_id',%0A field=models.PositiveIntegerField(db_index=True),%0A ),%0A %5D%0A
fb5f6bf999b2cd8b674bc2c89f74f1413fc8ee1e
Add command line interface to play
command_line_tic_tac_toe.py
command_line_tic_tac_toe.py
Python
0.000001
@@ -0,0 +1,2710 @@ +#!/usr/bin/env python3%0A%0Aimport cmd%0Afrom tictactoe.ai_player import AIPlayer%0Afrom tictactoe.human_player import HumanPlayer%0Afrom tictactoe.game_controller import GameController%0Afrom tictactoe.board_stringification import BoardStringification%0A%0Aclass CommandLineTicTacToe(cmd.Cmd):%0A def __init__(self,%0A intro=%22Tic Tac Toe CLI. Type help for help.%5Cn%5CnHuman. You are X. Good luck. Your move%5Cn%5Cn%22,%0A prompt=%22%E2%86%92 %22):%0A cmd.Cmd.__init__(self)%0A self.intro = intro%0A self.prompt = prompt%0A self._human = HumanPlayer(%22X%22, self._notify_move)%0A self._ai = AIPlayer(%22O%22, %22X%22)%0A self._controller = GameController(self._human, self._ai, self._won_notification, self._draw_notification)%0A%0A def _won_notification(self):%0A print(%22Game over. It was won%5Cn%5Cn%22)%0A self._print_board()%0A self.do_reset(None)%0A%0A def _draw_notification(self):%0A print(%22Game over. It was a draw%5Cn%5Cn%22)%0A self._print_board()%0A self.do_reset(None)%0A %0A def do_end(self, args):%0A return True%0A%0A def help_end(self):%0A print(%22End session%22)%0A%0A do_EOF = do_end%0A%0A help_EOF = help_end%0A%0A def do_reset(self, args):%0A self.do_human_start(None)%0A%0A def help_reset(self):%0A print(%22Reset the current game%22)%0A%0A def do_move(self, args):%0A print(%22Move passed in is: %7B0%7D%22.format(args))%0A try:%0A self._controller.place_move(self._human, int(args))%0A except ValueError as e:%0A print(%22Sorry, can't make that move: %7B0%7D%22.format(e.args%5B0%5D))%0A %0A%0A def help_move(self):%0A print(%22move x: Make a move at position x on the board%22)%0A%0A def do_show_board(self, args):%0A print(%22Current game state%5Cn%22)%0A self._print_board()%0A%0A def help_show_board(self):%0A print(%22Shows the current state of the game%22)%0A%0A def do_ai_start(self, args):%0A self._controller = GameController(self._ai, self._human, self._won_notification, self._draw_notification)%0A self._controller.notify_play()%0A%0A def help_ai_start(self):%0A print(%22Initiate a new game where the AI starts%22)%0A%0A def do_human_start(self, args):%0A self._controller = GameController(self._human, self._ai, self._won_notification, self._draw_notification)%0A self._controller.notify_play()%0A%0A def help_human_start(self):%0A print(%22Initiate a new game where the AI starts%22)%0A %0A def _notify_move(self):%0A print(%22Human, your move:%5Cn%22)%0A self._print_board()%0A %0A def _print_board(self):%0A print(BoardStringification().print_game_positions(self._controller._board))%0A %0Aif __name__ == '__main__':%0A cli = CommandLineTicTacToe()%0A cli.cmdloop()%0A
6d59e6d37d6f33f3513a1c6b1cb7d0d9062f391e
Create ClassesandInstances.py
EmployeeManagementSystem/Findings/ClassesandInstances.py
EmployeeManagementSystem/Findings/ClassesandInstances.py
Python
0
@@ -0,0 +1,1195 @@ +#Creating and instantiating python classes%0A#classes - they allow us to logically group data(attributes) and functions (methods)%0A'''class Employee:%0A pass%0Aprint (%22Class (Blueprint) vs Instance%22)%0Aemp1 = Employee()%0Aemp2 = Employee()%0Aprint (emp1)%0Aprint (emp2)%0A%0Aprint (%22instance variables contains data unique to each instance%22)%0Aemp1.first ='Manoj'%0Aemp1.last = 'Putchala'%0Aemp1.email = '[email protected]'%0Aemp1.pay = 5000%0A%0Aemp2.first ='Lalitha'%0Aemp2.last = 'Putchala'%0Aemp2.email = '[email protected]'%0Aemp2.pay = 6000%0A%0Aprint (emp1.email)%0Aprint (emp2.email)%0A%0A'''%0Aclass Employee:%0A #Constructor or Initializer #Instance is called as self(by default should be used)%0A #Name, email and pay are attributes%0A def __init__(self,first,last, pay):%0A self.fname = first%0A self.lname = last%0A self.epay = pay%0A self.email = first+'.'+last+'@company.com'%0A%0A def fullname(self): #method for code reuse #self should not be forgotten (one common mistake)%0A return '%7B%7D %7B%7D'.format(emp1.fname,self.lname)%0A%0A%0Aemp1 = Employee('Manoj','Kumar',100000)%0Aprint (emp1.epay)%0Aprint (emp1.fullname())%0A%0Aprint (Employee.fullname(emp1)) # another way of calling the instance using class%0A
ec22c2d82ff4f045b992014d17ada850359c2ab6
change folder layout
patterning_algorithm/color_halftone.py
patterning_algorithm/color_halftone.py
Python
0.000001
@@ -0,0 +1,2211 @@ +# This program takes a raster color image and produces its raster color halftone using patterning algorithm .%0A# Split the image into C, M, Y, K.%0A# Rotate each separated image by 0, 15, 30, and 45 degrees respectively.%0A# Take the half-tone of each image (dot size will be proportional to the intensity).%0A# Rotate back each half-toned image.%0A# Now you have your colour separated images. The rotation step reduces %0A# dot alignment issues (which would mess everything up), and things like Moire pattern%0A# effects will be reasonably minimized.%0A%0A%0Aimport numpy as np%0Afrom PIL import Image%0Afrom patterning_clustered_dot import intensity, patterning%0A%0Adef gcr(im, percentage):%0A # basic %22Gray Component Replacement%22 function. Returns a CMYK image with %0A # percentage gray component removed from the CMY halftones and put in the%0A # K halftone, ie. for percentage=100, (41, 100, 255, 0) %3E%3E (0, 59, 214, 41)%0A cmyk_im = im.convert('CMYK')%0A if not percentage:%0A return cmyk_im%0A cmyk_im = cmyk_im.split()%0A cmyk = %5B%5D%0A for i in range(4):%0A cmyk.append(cmyk_im%5Bi%5D.load())%0A for x in range(im.size%5B0%5D):%0A for y in range(im.size%5B1%5D):%0A gray = min(cmyk%5B0%5D%5Bx,y%5D, cmyk%5B1%5D%5Bx,y%5D, cmyk%5B2%5D%5Bx,y%5D) * percentage / 100%0A for i in range(3):%0A cmyk%5Bi%5D%5Bx,y%5D = cmyk%5Bi%5D%5Bx,y%5D - gray%0A cmyk%5B3%5D%5Bx,y%5D = gray%0A return Image.merge('CMYK', cmyk_im)%0A%0Adef color_halftoning_with_rotation(cmyk,increment_in_angle):%0A dots=%5B%5D %0A angle=0%0A for i in range(4):%0A channel = Image.fromarray(patterning(cmyk%5Bi%5D.rotate(angle,expand=1))).convert('L')%0A channel = channel.rotate(-angle,expand=1)%0A width_half, height_half = channel.size%0A xx = (width_half-cmyk%5Bi%5D.size%5B0%5D*3) / 2%0A yy = (height_half-cmyk%5Bi%5D.size%5B1%5D*3) / 2%0A channel = channel.crop((xx, yy, xx + cmyk%5Bi%5D.size%5B0%5D*3, yy + cmyk%5Bi%5D.size%5B1%5D*3))%0A dots.append(channel)%0A angle += increment_in_angle%0A return dots%0A%0Adef main():%0A fname = 'tree.jpg'%0A image = Image.open(fname)%0A image = gcr(image,100)%0A cmyk = image.split() %0A dots = color_halftoning_with_rotation(cmyk,15)%0A new_cmyk = Image.merge('CMYK',dots)%0A new_cmyk.save(%22output.jpg%22)%0A new_cmyk.show()%0Aif __name__==%22__main__%22:%0A main()%0A
eced1499c4b82ce83f954a0364b02f2116a11326
Add quick verification checker.
src/Scripts/verify.py
src/Scripts/verify.py
Python
0
@@ -0,0 +1,1126 @@ +# Take a ground truth file produced by the verifier and a match file and compare them.%0A# Output is in fully normalized format, the same as VerifyCommand.cpp produces.%0A#%0A%0A# TODO: remove hardcoded paths.%0A%0A# file format:%0A# term,docId,%5B0-3%5D%0A# 0: true positive%0A# 1: false postive%0A# 2: false negative%0A# 3: unverified%0A%0Afrom collections import defaultdict%0Aimport csv%0A%0Atrue_matches = defaultdict(set)%0A%0Awith open(%22/tmp/groundTruth.csv%22) as f:%0A reader = csv.reader(f)%0A for row in reader:%0A if (len(row) == 3 and (row%5B2%5D == '0' or row%5B2%5D == '2')):%0A true_matches%5Brow%5B0%5D%5D.add(row%5B1%5D)%0A%0Awith open(%22/tmp/unknowns.csv%22) as f:%0A reader = csv.reader(f)%0A for row in reader:%0A # TODO: assert that value is '3'%0A #%0A # TODO: handle false negatives. Could keep a counter of how many matches%0A # we've seen and compare, then iterate over the set in the rare instance%0A # we see a false negative.%0A if (len(row) == 3):%0A if row%5B1%5D in true_matches%5Brow%5B0%5D%5D:%0A print(row%5B0%5D + %22,%22 + row%5B1%5D + %22,0%22)%0A else:%0A print(row%5B0%5D + %22,%22 + row%5B1%5D + %22,1%22)%0A
7d82a39b23c0e821b47732b826cfa655facfe330
reorder sections: `outputs` directly after `build`
conda_smithy/lint_recipe.py
conda_smithy/lint_recipe.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals import io import itertools import os import re import jinja2 import ruamel.yaml from conda_build.metadata import ensure_valid_license_family EXPECTED_SECTION_ORDER = ['package', 'source', 'build', 'requirements', 'outputs', 'test', 'app', 'about', 'extra'] REQUIREMENTS_ORDER = ['build', 'run'] class NullUndefined(jinja2.Undefined): def __unicode__(self): return self._undefined_name def __getattr__(self, name): return '{}.{}'.format(self, name) def __getitem__(self, name): return '{}["{}"]'.format(self, name) def get_section(parent, name, lints): section = parent.get(name, {}) if not isinstance(section, dict): lints.append('The "{}" section was expected to be a dictionary, but ' 'got a {}.'.format(name, type(section).__name__)) section = {} return section def lintify(meta, recipe_dir=None): lints = [] major_sections = list(meta.keys()) # If the recipe_dir exists (no guarantee within this function) , we can # find the meta.yaml within it. meta_fname = os.path.join(recipe_dir or '', 'meta.yaml') source_section = get_section(meta, 'source', lints) build_section = get_section(meta, 'build', lints) requirements_section = get_section(meta, 'requirements', lints) about_section = get_section(meta, 'about', lints) extra_section = get_section(meta, 'extra', lints) # 1: Top level meta.yaml keys should have a specific order. section_order_sorted = sorted(major_sections, key=EXPECTED_SECTION_ORDER.index) if major_sections != section_order_sorted: section_order_sorted_str = map(lambda s: "'%s'" % s, section_order_sorted) section_order_sorted_str = ", ".join(section_order_sorted_str) section_order_sorted_str = "[" + section_order_sorted_str + "]" lints.append('The top level meta keys are in an unexpected order. ' 'Expecting {}.'.format(section_order_sorted_str)) # 2: The about section should have a home, license and summary. for about_item in ['home', 'license', 'summary']: # if the section doesn't exist, or is just empty, lint it. if not about_section.get(about_item, ''): lints.append('The {} item is expected in the about section.' ''.format(about_item)) # 3: The recipe should have some maintainers. if not extra_section.get('recipe-maintainers', []): lints.append('The recipe could do with some maintainers listed in ' 'the `extra/recipe-maintainers` section.') # 4: The recipe should have some tests. if 'test' not in major_sections: test_files = ['run_test.py', 'run_test.sh', 'run_test.bat', 'run_test.pl'] a_test_file_exists = (recipe_dir is not None and any(os.path.exists(os.path.join(recipe_dir, test_file)) for test_file in test_files)) if not a_test_file_exists: lints.append('The recipe must have some tests.') # 5: License cannot be 'unknown.' license = about_section.get('license', '').lower() if 'unknown' == license.strip(): lints.append('The recipe license cannot be unknown.') # 6: Selectors should be in a tidy form. if recipe_dir is not None and os.path.exists(meta_fname): bad_selectors = [] # Good selectors look like ".*\s\s#\s[...]" good_selectors_pat = re.compile(r'(.+?)\s{2,}#\s\[(.+)\](?(2).*)$') with io.open(meta_fname, 'rt') as fh: for selector_line in selector_lines(fh): if not good_selectors_pat.match(selector_line): bad_selectors.append(selector_line) if bad_selectors: lints.append('Selectors are suggested to take a ' '``<two spaces>#<one space>[<expression>]`` form.') # 7: The build section should have a build number. if build_section.get('number', None) is None: lints.append('The recipe must have a `build/number` section.') # 8: The build section should be before the run section in requirements. requirements_order_sorted = sorted(requirements_section, key=REQUIREMENTS_ORDER.index) if list(requirements_section.keys()) != requirements_order_sorted: lints.append('The `requirements/build` section should be defined ' 'before the `requirements/run` section.') # 9: Files downloaded should have a hash. if ('url' in source_section and not ({'sha1', 'sha256', 'md5'} & set(source_section.keys()))): lints.append('When defining a source/url please add a sha256, sha1 ' 'or md5 checksum (sha256 preferably).') # 10: License should not include the word 'license'. license = about_section.get('license', '').lower() if 'license' in license.lower(): lints.append('The recipe `license` should not include the word ' '"License".') # 11: There should be one empty line at the end of the file. if recipe_dir is not None and os.path.exists(meta_fname): with io.open(meta_fname, 'r') as f: lines = f.read().split('\n') # Count the number of empty lines from the end of the file empty_lines = itertools.takewhile(lambda x: x == '', reversed(lines)) end_empty_lines_count = len(list(empty_lines)) if end_empty_lines_count > 1: lints.append('There are {} too many lines. ' 'There should be one empty line at the end of the ' 'file.'.format(end_empty_lines_count - 1)) elif end_empty_lines_count < 1: lints.append('There are too few lines. There should be one empty ' 'line at the end of the file.') # 12: License family must be valid (conda-build checks for that) try: ensure_valid_license_family(meta) except RuntimeError as e: lints.append(str(e)) return lints def selector_lines(lines): # Using the same pattern defined in conda-build (metadata.py), # we identify selectors. sel_pat = re.compile(r'(.+?)\s*(#.*)?\[(.+)\](?(2).*)$') for line in lines: line = line.rstrip() if line.lstrip().startswith('#'): # Don't bother with comment only lines continue m = sel_pat.match(line) if m: m.group(3) yield line def main(recipe_dir): recipe_dir = os.path.abspath(recipe_dir) recipe_meta = os.path.join(recipe_dir, 'meta.yaml') if not os.path.exists(recipe_dir): raise IOError('Feedstock has no recipe/meta.yaml.') env = jinja2.Environment(undefined=NullUndefined) with io.open(recipe_meta, 'rt') as fh: content = env.from_string(''.join(fh)).render(os=os) meta = ruamel.yaml.load(content, ruamel.yaml.RoundTripLoader) results = lintify(meta, recipe_dir) return results
Python
0
@@ -261,26 +261,21 @@ uild', ' -requiremen +outpu ts',%0A @@ -298,21 +298,26 @@ ' -outpu +requiremen ts', 'te
b1ef133904540b7f49e22ac52a0f844963be829e
Add basic test for discovery loader
nose2/tests/functional/test_discovery_loader.py
nose2/tests/functional/test_discovery_loader.py
Python
0
@@ -0,0 +1,954 @@ +from nose2.tests._common import FunctionalTestCase, support_file%0Afrom nose2 import events, loader, session%0Afrom nose2.plugins.loader.discovery import DiscoveryLoader%0A%0A%0Aclass Watcher(events.Plugin):%0A def __init__(self):%0A self.called = %5B%5D%0A%0A def loadTestsFromModule(self, event):%0A self.called.append(event)%0A%0A%0Aclass DiscoveryFunctionalTest(FunctionalTestCase):%0A def setUp(self):%0A self.session = session.Session()%0A self.plug = DiscoveryLoader(session=self.session)%0A self.loader = loader.PluggableTestLoader(self.session)%0A%0A def test_createTests_hook(self):%0A self.plug.start_dir = support_file('scenario/tests_in_package')%0A watcher = Watcher(session=self.session)%0A watcher.register()%0A event = events.CreateTestsEvent(self.loader, None, None)%0A result = self.session.hooks.createTests(event)%0A assert isinstance(result, self.loader.suiteClass)%0A assert watcher.called%0A%0A
2de3ab69c0725312663ecd94378c5b267a6c5ab1
Add graph_data.py with a graph_ratings function
graph_data.py
graph_data.py
Python
0.000053
@@ -0,0 +1,1150 @@ +%22%22%22Graph properties and patterns of the raw data%0A%0A.. moduleauthor:: Jan Van Bruggen %[email protected]%3E%0A%22%22%22%0Aimport matplotlib.pyplot as plt%0A%0A%0Adef graph_ratings():%0A num_points = 1e5%0A ratings = rating_counts('data/mu/all.dta', num_points)%0A rating_numbers = sorted(ratings.keys())%0A x = %5Bi - 0.4 for i in rating_numbers%5D%0A y = %5Bratings%5Bi%5D for i in rating_numbers%5D%0A plt.bar(x, y, width=0.8)%0A plt.title('Number of Ratings by Rating (%7B:n%7D points)'.format(num_points))%0A plt.xlabel('Rating')%0A plt.xlim(-0.4, 5.4)%0A plt.ylabel('Number of Ratings')%0A plt.show()%0A%0A%0Adef rating_counts(data_file_name, num_points=float('inf'), rating_column=3):%0A ratings = %7B%7D%0A count = 0%0A with open(data_file_name, 'r') as data_file:%0A for line in data_file:%0A count += 1%0A if count %3E num_points:%0A break%0A values = line.split()%0A rating = int(values%5Brating_column%5D)%0A try:%0A ratings%5Brating%5D += 1%0A except KeyError:%0A ratings%5Brating%5D = 1%0A return ratings%0A%0A%0Adef run():%0A graph_ratings()%0A%0A%0Aif __name__ == '__main__':%0A run()%0A
7a861623987225bd786301dfe6dea78173ddaf1a
Create generator.py
Testing_Hadoop/generator.py
Testing_Hadoop/generator.py
Python
0.000001
@@ -0,0 +1,240 @@ +import time%0Astart_time = time.time()%0Afo = open(%22hadoop_test_data.txt%22, %22wb%22)%0Afor i in range(0,9):%0A for i in range(0,10000000):%0A fo.write(%22Hadoop %22);%0Afo.close()%0Aprint(%22--- %25s seconds ---%22 %25 (time.time() - start_time))%0A
963aa3fd9830d1a4817a26a2e8a5676174e30d19
Add new migration
planner/migrations/0005_auto_20150711_1117.py
planner/migrations/0005_auto_20150711_1117.py
Python
0
@@ -0,0 +1,524 @@ +# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('planner', '0004_auto_20150616_1926'),%0A %5D%0A%0A operations = %5B%0A migrations.RenameField(%0A model_name='route',%0A old_name='destination',%0A new_name='end',%0A ),%0A migrations.RenameField(%0A model_name='route',%0A old_name='origin',%0A new_name='start',%0A ),%0A %5D%0A
ea9b6920c88ac40a72aadd70199a52f27a1c097e
Create RespostaListar.py
backend/Models/Predio/RespostaListar.py
backend/Models/Predio/RespostaListar.py
Python
0
@@ -0,0 +1,242 @@ +from Framework.Resposta import Resposta%0Afrom Models.Predio.Predio import Predio as ModelPredio%0Aclass RespostaListar(Resposta):%0A%0A%09def __init__(self,predios):%0A%09%09self.corpo = %5B%5D%0A%09%09for predio in predios:%0A%09%09%09self.corpo.append(ModelPredio(predio))%0A
87804aef17874339e7b58df0c3bcb29338fa412a
add country regions include Minsk
belarus_region_borders_include_minsk.py
belarus_region_borders_include_minsk.py
Python
0.999999
@@ -0,0 +1,880 @@ +from _helpers import cursor_wrap, dump%0A%0A%0A@cursor_wrap%0Adef main(cursor):%0A sql = %22%22%22%0A SELECT r.osm_id, c.name AS country, r.name AS region, ST_AsGeoJSON(r.way)%0A FROM osm_polygon c%0A LEFT JOIN osm_polygon r ON ST_Contains(c.way, r.way)%0A WHERE c.osm_id = -59065 AND r.admin_level = '4'%0A AND r.osm_id IN (-59189, -59506, -59161, -59275, -59162)%0A%0A UNION%0A%0A SELECT -59752, FIRST(c.name) AS country, FIRST(r.name) AS region, ST_AsGeoJSON(ST_Union(r.way))%0A FROM osm_polygon c%0A LEFT JOIN osm_polygon r ON ST_Contains(c.way, r.way)%0A WHERE c.osm_id = -59065 AND r.admin_level = '4'%0A AND r.osm_id IN (-59752, -59195)%0A %22%22%22%0A cursor.execute(sql)%0A dump(__file__, sorted(cursor.fetchall(), key=lambda item: item%5B1:3%5D),%0A ('osmid', 'country', 'region', 'geojson'))%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
b5bc7827fb2452e82789129b918861157010c58e
Create pokebot.py
pokebot.py
pokebot.py
Python
0.000005
@@ -0,0 +1,1925 @@ +#!/usr/bin/python3%0A#%0A# Author: Luke%0A%0Aimport time, ts3, sys, traceback%0A%0AUSER = 'serveradmin' # Query user%0APASS = '' # Query Password%0AHOST = 'localhost' # Query Server-host%0APORT = '10011' # Query Server-Port%0ASID = 1 # Serveradmin sid (dont touch)%0A%0Adef usage():%0A print ('%5Cn./Poke-bot.py %3CName%3E %3Chow many times%3E %3CMessage%3E%5Cn')%0A sys.exit(0)%0A%0Adef Poke(ts3conn,target,timesMany,msg):%0A try: %0A clientlist = ts3conn.clientlist()%0A clientlist = %5Bclient for client in clientlist %5C%0A if client%5B%22client_type%22%5D != %221%22%5D%0A%0A for client in clientlist: %0A clid = client%5B'clid'%5D%0A nickname = client%5B'client_nickname'%5D %0A if str(nickname) == str(target):%0A print (' %5CnFound target',target,'%5Cn%5CnPoking now!...%5Cn')%0A for i in range(int(timesMany)):%0A time.sleep(0.5)%0A ts3conn.clientpoke(clid=clid, msg=msg)%0A sys.exit(0)%0A sys.exit(0)%0A %0A except KeyboardInterrupt:%0A print (' %5CnExiting...%5Cn')%0A except Exception:%0A traceback.print_exc(file=sys.stdout)%0A sys.exit(0) %0A%0A%0Adef main(target,timesMany,message):%0A with ts3.query.TS3Connection(HOST,PORT) as ts3conn:%0A try:%0A ts3conn.login(client_login_name=USER, client_login_password=PASS)%0A ts3conn.use(sid=SID)%0A Poke(ts3conn,target,timesMany,message)%0A%0A except ts3.query.TS3QueryError as err:%0A if err.resp.error%5B%22id%22%5D == %22520%22:%0A print ('%5CnWrong Username Or Password!%5Cn')%0A sys.exit(0)%0A%0A %0Aif __name__ == '__main__':%0A try:%0A if len(sys.argv) != 4:%0A usage() %0A int(sys.argv%5B2%5D)%0A main(sys.argv%5B1%5D,sys.argv%5B2%5D,sys.argv%5B3%5D)%0A %0A except ValueError:%0A print ('%5CnSecond Arg %5C''+sys.argv%5B2%5D+'%5C' Must Be Integer Value!%5Cn')%0A usage() %0A %0A
4f87a0e144bf738e523cd1f8d914f39090275fee
add review status to individuals
xbrowse_server/base/migrations/0008_individual_review_status.py
xbrowse_server/base/migrations/0008_individual_review_status.py
Python
0
@@ -0,0 +1,645 @@ +# -*- coding: utf-8 -*-%0A# Generated by Django 1.9.4 on 2016-10-05 09:07%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('base', '0007_auto_20160826_1327'),%0A %5D%0A%0A operations = %5B%0A migrations.AddField(%0A model_name='individual',%0A name='review_status',%0A field=models.CharField(blank=True, choices=%5B(b'A', b'Accepted'), (b'E', b'Accepted - Exome'), (b'G', b'Accepted - Genome'), (b'R', b'Not Accepted'), (b'N', b'See Notes'), (b'H', b'Hold')%5D, default=b'', max_length=1, null=True),%0A ),%0A %5D%0A
85b9d1eed3aea2ed56b85819f6e2269aef9dd128
Add MemAvailable to default keys.
src/collectors/memory/memory.py
src/collectors/memory/memory.py
# coding=utf-8 """ This class collects data on memory utilization Note that MemFree may report no memory free. This may not actually be the case, as memory is allocated to Buffers and Cache as well. See [this link](http://www.linuxatemyram.com/) for more details. #### Dependencies * /proc/meminfo or psutil """ import diamond.collector import diamond.convertor import os try: import psutil except ImportError: psutil = None _KEY_MAPPING = [ 'MemTotal', 'MemFree', 'Buffers', 'Cached', 'Active', 'Dirty', 'Inactive', 'Shmem', 'SwapTotal', 'SwapFree', 'SwapCached', 'VmallocTotal', 'VmallocUsed', 'VmallocChunk', 'Committed_AS', ] class MemoryCollector(diamond.collector.Collector): PROC = '/proc/meminfo' def get_default_config_help(self): config_help = super(MemoryCollector, self).get_default_config_help() config_help.update({ 'detailed': 'Set to True to Collect all the nodes', }) return config_help def get_default_config(self): """ Returns the default collector settings """ config = super(MemoryCollector, self).get_default_config() config.update({ 'path': 'memory', # Collect all the nodes or just a few standard ones? # Uncomment to enable # 'detailed': 'True' }) return config def collect(self): """ Collect memory stats """ if os.access(self.PROC, os.R_OK): file = open(self.PROC) data = file.read() file.close() for line in data.splitlines(): try: name, value, units = line.split() name = name.rstrip(':') value = int(value) if (name not in _KEY_MAPPING and 'detailed' not in self.config): continue for unit in self.config['byte_unit']: value = diamond.convertor.binary.convert(value=value, oldUnit=units, newUnit=unit) self.publish(name, value, metric_type='GAUGE') # TODO: We only support one unit node here. Fix it! break except ValueError: continue return True else: if not psutil: self.log.error('Unable to import psutil') self.log.error('No memory metrics retrieved') return None phymem_usage = psutil.phymem_usage() virtmem_usage = psutil.virtmem_usage() units = 'B' for unit in self.config['byte_unit']: value = diamond.convertor.binary.convert( value=phymem_usage.total, oldUnit=units, newUnit=unit) self.publish('MemTotal', value, metric_type='GAUGE') value = diamond.convertor.binary.convert( value=phymem_usage.available, oldUnit=units, newUnit=unit) self.publish('MemAvailable', value, metric_type='GAUGE') value = diamond.convertor.binary.convert( value=phymem_usage.free, oldUnit=units, newUnit=unit) self.publish('MemFree', value, metric_type='GAUGE') value = diamond.convertor.binary.convert( value=virtmem_usage.total, oldUnit=units, newUnit=unit) self.publish('SwapTotal', value, metric_type='GAUGE') value = diamond.convertor.binary.convert( value=virtmem_usage.free, oldUnit=units, newUnit=unit) self.publish('SwapFree', value, metric_type='GAUGE') # TODO: We only support one unit node here. Fix it! break return True return None
Python
0
@@ -451,16 +451,36 @@ ING = %5B%0A + 'MemAvailable',%0A 'Mem
34bc4b9e5731c94ae4655deb338d67aa3f9a1f63
Create project.py
project.py
project.py
Python
0.000001
@@ -0,0 +1,1665 @@ +from ggame import App, RectangleAsset, ImageAsset, SoundAsset, Sprite, Sound%0Afrom ggame import LineStyle, Color%0A%0ASCREEN_WIDTH = 640%0ASCREEN_HEIGHT = 480%0A%0Agreen = Color(0x00ff00, 1)%0Ablack = Color(0, 1)%0Anoline = LineStyle(0, black)%0Abg_asset = RectangleAsset(SCREEN_WIDTH, SCREEN_HEIGHT, noline, green)%0Abg = Sprite(bg_asset, (0,0))%0A%0A# Sounds%0Apew1_asset = SoundAsset(%22sounds/pew1.mp3%22)%0Apew1 = Sound(pew1_asset)%0Apop_asset = SoundAsset(%22sounds/reappear.mp3%22)%0Apop = Sound(pop_asset)%0A# A ball! This is already in the ggame-tutorials repository%0Aball_asset = ImageAsset(%22images/orb-150545_640.png%22)%0Aball = Sprite(ball_asset, (0, 0))%0A# Original image is too big. Scale it to 1/10 its original size%0Aball.scale = 0.1%0Aball.y = 200%0A# custom attributes%0Aball.dir = 1%0Aball.go = True%0A# Sounds%0Apew1_asset = SoundAsset(%22sounds/pew1.mp3%22)%0Apew1 = Sound(pew1_asset)%0Apop_asset = SoundAsset(%22sounds/reappear.mp3%22)%0Apop = Sound(pop_asset)%0A%0A%0Adef reverse(b):%0A b.dir *= -1%0A pop.play()%0A%0A# Set up function for handling screen refresh%0Adef step():%0A if ball.go:%0A ball.x += ball.dir%0A if ball.x + ball.width %3E SCREEN_WIDTH or ball.x %3C 0:%0A ball.x -= ball.dir%0A reverse(ball)%0A%0A# Handle the space key%0Adef spaceKey(event):%0A ball.go = not ball.go%0A%0A# Handle the %22reverse%22 key%0Adef reverseKey(event):%0A reverse(ball)%0A%0A# Handle the mouse click%0Adef mouseClick(event):%0A ball.x = event.x%0A ball.y = event.y%0A pew1.play()%0A%0Amyapp = App(SCREEN_WIDTH, SCREEN_HEIGHT)%0A# Set up event handlers for the app%0Amyapp.listenKeyEvent('keydown', 'space', spaceKey)%0Amyapp.listenKeyEvent('keydown', 'r', reverseKey)%0Amyapp.listenMouseEvent('click', mouseClick)%0A%0Amyapp.run(step)%0A
681cc0a4160373fe82de59946b52e0e21611af84
Print out all links on a page
linkLister.py
linkLister.py
Python
0
@@ -0,0 +1,263 @@ +import requests%0Aimport re%0A%0Aurl = raw_input(%22Enter URL with http or https prefix : %22 )%0Aprint url%0Awebsite= requests.get(url)%0A%0Ahtml = website.text%0Aprint html%0Alinklist = re.findall('%22((http%7Cftp)s?://.*?)%22',html)%0Aprint linklist%0Afor link in linklist:%0A print link%5B0%5D%0A
6d4c3b77c9f0b4889ad5265113d9a87a0dc88377
Add space in beused
src/ggrc/converters/errors.py
src/ggrc/converters/errors.py
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file> # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> # Created By: [email protected] # Maintained By: [email protected] WRONG_FILE_TYPE = (u"Line {line}: Wrong file type. Only .csv files are" " supported. Please upload a .csv file.") MISSING_COLUMN = (u"Line {line}: Missing mandatory column{s} {column_names}," " when adding object.") MISSING_CUSTOM_ATTRIBUTE_COLUMN = (u"Line {line}: Missing custom column" " {column_name}, when adding object.") WRONG_OBJECT_TYPE = "Line {line}: Unknown object type '{object_name}'" UNKNOWN_COLUMN = (u"Line {line}: Attribute '{column_name}' does not" " exist. Column will be ignored.") DELETE_UNKNOWN_OBJECT = (u"Line {line}: Object '{slug}' does not exist, so it" " can't be deleted.") DUPLICATE_VALUE = (u"Line {line}: {column_name} '{title}' already exists." "Record will be ignored.") DUPLICATE_VALUE_IN_CSV = (u"Lines {line_list} have same {column_name}" " '{value}'. Line{s} {ignore_lines} will be" " ignored.") MAP_UNMAP_CONFLICT = (u"Line {line}: Object '{slug}' scheduled for mapping and" " unmapping at the same time. Mapping rule update will" " be ignored.") UNKNOWN_OBJECT = (u"Line {line}: {object_type} '{slug}' doesn't exist, so it" " can't be mapped/unmapped.") WHITESPACE_WARNING = (u"Line {line}: Field {column_name} contains multiple" "spaces together, that will be merged into one.") UNKNOWN_USER_WARNING = (u"Line {line}: Specified user '{email}' does not exist." " That user will be ignored.") UNKNOWN_USER_ERROR = (u"Specified user '{email}' does not exist. That user will" " be ignored.") OWNER_MISSING = (u"Line {line}: Owner field does not contain a valid owner." " You will be assigned as object owner.") WRONG_MULTI_VALUE = (u"Line {line}: {column_name} contains invalid line. The" " value '{value}' will be ignored.") WRONG_VALUE = (u"Line {line}: {column_name} contains invalid data. The value" " will be ignored.") WRONG_VALUE_ERROR = (u"Line {line}: {column_name} contains invalid data. The" " line will be ignored.") WRONG_REQUIRED_VALUE = (u"Line {line}: Required field {column_name} contains" " invalid data '{value}'. The default value will be" "used.") MISSING_VALUE_ERROR = (u"Line {line}: Field {column_name} is required. The line" " will be ignored.") MAPPING_PERMISSION_ERROR = (u"Line {line}: You don't have permission to" " map/unmap this record. Value {value} will be" " ignored.") PERMISSION_ERROR = (u"Line {line}: You don't have permission to update/delete" " this record.") MAPPING_PERMISSION_ERROR = (u"Line {line}: You don't have permission to update" " mappings for {object_type}: {title} ({slug}).") DELETE_NEW_OBJECT_ERROR = (u"Line {line}: Tried to create and delete the same" " object {object_type}: {slug} in one import.") DELETE_CASCADE_ERROR = (u"Line {line}: Cannot delete object {object_type}:" " {slug} without deleting other objects") UNKNOWN_ERROR = "Line {line}: Import failed due to unknown error."
Python
0.00469
@@ -2687,16 +2687,17 @@ %22 + used.%22)%0A
51f8b228ff1096769a06b47d026e81a166503a82
add missing unit tests for previous commit
pymatgen/util/tests/test_decorators.py
pymatgen/util/tests/test_decorators.py
Python
0
@@ -0,0 +1,1309 @@ +import unittest%0Afrom pymatgen.util.decorators import lru_cache%0A%0A%0Aclass TestLRUCache(unittest.TestCase):%0A def test_function(self):%0A @lru_cache(2)%0A def cached_func(a, b):%0A return a + b%0A%0A #call a few times to get some stats%0A self.assertEqual(cached_func(1, 2), 3)%0A self.assertEqual(cached_func(3, 2), 5)%0A self.assertEqual(cached_func(3, 2), 5)%0A self.assertEqual(cached_func(1, 2), 3)%0A self.assertEqual(cached_func(4, 2), 6)%0A self.assertEqual(cached_func(4, 2), 6)%0A self.assertEqual(cached_func(3, 2), 5)%0A self.assertEqual(cached_func(1, 2), 3)%0A%0A self.assertEqual(cached_func.cache_info().hits, 3)%0A self.assertEqual(cached_func.cache_info().misses, 5)%0A%0A def test_class_method(self):%0A class TestClass():%0A @lru_cache(10)%0A def cached_func(self, x):%0A return x%0A%0A a = TestClass()%0A b = TestClass()%0A%0A self.assertEqual(a.cached_func(1), 1)%0A self.assertEqual(b.cached_func(2), 2)%0A self.assertEqual(b.cached_func(3), 3)%0A self.assertEqual(a.cached_func(3), 3)%0A self.assertEqual(a.cached_func(1), 1)%0A%0A self.assertEqual(a.cached_func.cache_info().hits, 1)%0A self.assertEqual(a.cached_func.cache_info().misses, 4)%0A
61a6f6468462ed5db6c8e6c55bf29f0c503ff899
add solution for H-Index
algorithms/hIndex/hIndex.py
algorithms/hIndex/hIndex.py
Python
0.000002
@@ -0,0 +1,374 @@ +class Solution(object):%0A def hIndex(self, citations):%0A %22%22%22%0A :type citations: List%5Bint%5D%0A :rtype: int%0A %22%22%22%0A n = len(citations)%0A c = collections.Counter(%5Bmin(x, n) for x in citations%5D)%0A s = reduce(lambda a, x: a + %5Ba%5B-1%5D + c%5Bx%5D%5D, reversed(range(n)), %5Bc%5Bn%5D%5D)%0A return next((n-i for i, v in enumerate(s) if v %3E= n-i), 0)%0A
c2f0f5184665250949c32d16db0b521c357e3aa7
Add solution to linkedListCycle problem.
python/src/linkedListCycle/linkedListCycle.py
python/src/linkedListCycle/linkedListCycle.py
Python
0
@@ -0,0 +1,1775 @@ +# Given a linked list, determine if it has a cycle in it.%0A%0A# Follow up:%0A# Can you solve it without using extra space?%0A%0A# Definition for singly-linked list.%0Aclass ListNode:%0A def __init__(self, x):%0A self.val = x%0A self.next = None%0A%0Aclass Solution:%0A # @param head, a ListNode%0A # @return a boolean%0A def hasCycle0(self, head):%0A %22%22%22Solving the problem with a visited array: O(n%5E2) run time and%0A O(n) memory.%22%22%22%0A visited = %5B%5D%0A while head is not None:%0A if head in visited:%0A return True%0A visited.append(head)%0A head = head.next%0A return False %0A%0A def hasCycle1(self, head):%0A %22%22%22Solving the problem iteratively with the tortise and the hare%0A pointers: O(n) run time and O(1) memory.%22%22%22%0A if head is None or head.next is None:%0A return False%0A%0A tortise = head.next%0A hare = head.next.next%0A%0A while hare is not None and hare.next is not None:%0A if tortise == hare:%0A return True%0A else:%0A tortise = tortise.next%0A hare = hare.next.next%0A return False%0A%0A def hasCycle(self, head):%0A %22%22%22Solving the problem recursively with the tortise and the hare%0A pointers: O(n) run time and O(1) memory.%22%22%22%0A if head is None or head.next is None:%0A return False%0A else:%0A return self.hasCycleRecurse(head.next, head.next.next)%0A%0A def hasCycleRecurse(self, tortise, hare):%0A %22%22%22Used in above recursive solution.%22%22%22%0A if hare is None or hare.next is None:%0A return False%0A elif tortise == hare:%0A return True%0A else:%0A return self.hasCycleRecurse(tortise.next, hare.next.next)%0A
2ab5d0bfdfe90279f3fffeeb51882cdbcb4e9135
test genesis tests
tests/unit/modules/genesis_test.py
tests/unit/modules/genesis_test.py
Python
0.000001
@@ -0,0 +1,2276 @@ +# -*- coding: utf-8 -*-%0A'''%0A :codeauthor: :email:%60Rupesh Tare %[email protected]%3E%60%0A'''%0A%0A# Import Salt Testing Libs%0Afrom salttesting import TestCase, skipIf%0Afrom salttesting.mock import (%0A MagicMock,%0A patch,%0A NO_MOCK,%0A NO_MOCK_REASON%0A)%0A%0A# Import Salt Libs%0Afrom salt.modules import genesis%0A%0A%0A# Globals%0Agenesis.__grains__ = %7B%7D%0Agenesis.__salt__ = %7B%7D%0Agenesis.__context__ = %7B%7D%0Agenesis.__opts__ = %7B%7D%0A%0A%0A@skipIf(NO_MOCK, NO_MOCK_REASON)%0Aclass GenesisTestCase(TestCase):%0A '''%0A Test cases for salt.modules.genesis%0A '''%0A def test_bootstrap(self):%0A '''%0A Test for Create an image for a specific platform.%0A '''%0A mock = MagicMock(return_value=False)%0A with patch.dict(genesis.__salt__, %7B'file.directory_exists': mock%7D):%0A mock = MagicMock(side_effect=Exception('foo'))%0A with patch.dict(genesis.__salt__, %7B'file.mkdir': mock%7D):%0A self.assertEqual(genesis.bootstrap('platform', 'root'),%0A %7B'Error': %22Exception('foo',)%22%7D)%0A%0A with patch.object(genesis, '_bootstrap_yum', return_value='A'):%0A self.assertEqual(genesis.bootstrap('rpm', 'root', 'dir1'), 'A')%0A%0A with patch.object(genesis, '_bootstrap_deb', return_value='A'):%0A self.assertEqual(genesis.bootstrap('deb', 'root', 'dir1'), 'A')%0A%0A with patch.object(genesis, '_bootstrap_pacman', return_value='A'):%0A self.assertEqual(genesis.bootstrap('pacman', 'root', 'dir1'), 'A')%0A%0A def test_avail_platforms(self):%0A '''%0A Test for Return which platforms are available%0A '''%0A self.assertFalse(genesis.avail_platforms()%5B'deb'%5D)%0A%0A def test_pack(self):%0A '''%0A Test for Pack up a directory structure, into a specific format%0A '''%0A with patch.object(genesis, '_tar', return_value='tar'):%0A self.assertEqual(genesis.pack('name', 'root'), None)%0A%0A def test_unpack(self):%0A '''%0A Test for Unpack an image into a directory structure%0A '''%0A with patch.object(genesis, '_untar', return_value='untar'):%0A self.assertEqual(genesis.unpack('name', 'root'), None)%0A%0A%0Aif __name__ == '__main__':%0A from integration import run_tests%0A run_tests(GenesisTestCase, needs_daemon=False)%0A
4ec7abe5df2bdd4a68528fc9af14288b57fd72cc
add integration utest on Session
tests_with_openerp/test_session.py
tests_with_openerp/test_session.py
Python
0
@@ -0,0 +1,779 @@ +from unittest import TestCase%0Afrom anybox.recipe.odoo.runtime.session import Session%0Afrom openerp.tests.common import get_db_name%0A%0A%0Aclass SessionTestCase(TestCase):%0A%0A def setUp(self):%0A super(SessionTestCase, self).setUp()%0A self.session = Session(None, None, parse_config=False)%0A%0A def open_session(self):%0A self.session.open(db=get_db_name())%0A%0A def test_env_after_install_module(self):%0A self.open_session()%0A self.assertAdminPresentWithV8API()%0A self.session.install_modules(%5B'decimal_precision'%5D)%0A self.assertAdminPresentWithV8API()%0A%0A def assertAdminPresentWithV8API(self):%0A self.assertEqual(%0A u%22Administrator%22,%0A self.session.env%5B'res.users'%5D.search(%5B('login', '=', 'admin')%5D).name%0A )%0A
da1bda146b4762bc572cb28da30cfb09b1d083aa
add hikvision (#243)
netdisco/discoverables/hikvision.py
netdisco/discoverables/hikvision.py
Python
0
@@ -0,0 +1,393 @@ +%22%22%22Discover Hikvision cameras.%22%22%22%0Afrom . import MDNSDiscoverable%0A%0A%0Aclass Discoverable(MDNSDiscoverable):%0A %22%22%22Add support for discovering Hikvision cameras.%22%22%22%0A%0A def __init__(self, nd):%0A %22%22%22Initialize Hikvision camera discovery.%22%22%22%0A super(Discoverable, self).__init__(nd, '_http._tcp.local.')%0A%0A def get_entries(self):%0A return self.find_by_device_name('HIKVISION')%0A
f7b2b511bd6cca122782b39c9eb75ed4a4736717
add benchmark
test/benchmark.py
test/benchmark.py
Python
0.000002
@@ -0,0 +1,277 @@ +import urllib2%0Aimport json%0A%0Aurl = %22http://localhost:3000/api?package=com.whatsapp%22%0A%0Afor i in range(5):%0A print 'Downloading '+ str(i)%0A res = urllib2.urlopen(url).read()%0A file = %22data-%22+str(i)+%22.json%22%0A with open(file, 'w') as outfile:%0A json.dump(res, outfile)%0A
27622185e04bb652284597783287262e23bafa7d
Add minimal test case (failing)
plenum/test/node_request/test_apply_stashed_partially_ordered.py
plenum/test/node_request/test_apply_stashed_partially_ordered.py
Python
0.000001
@@ -0,0 +1,2407 @@ +import pytest%0A%0Afrom plenum.common.constants import DOMAIN_LEDGER_ID%0Afrom plenum.common.startable import Mode%0Afrom plenum.common.txn_util import reqToTxn%0Afrom plenum.test.delayers import cDelay%0Afrom plenum.test.helper import sdk_get_and_check_replies, sdk_send_random_requests, logger%0Afrom plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data%0Afrom plenum.test.stasher import delay_rules%0Afrom plenum.test.test_node import getNonPrimaryReplicas%0Afrom stp_core.loop.eventually import eventually%0A%0ATOTAL_REQUESTS = 10%0A%0A%[email protected](scope=%22module%22)%0Adef tconf(tconf):%0A old_max_batch_wait = tconf.Max3PCBatchWait%0A old_max_batch_size = tconf.Max3PCBatchSize%0A tconf.Max3PCBatchWait = 1000%0A tconf.Max3PCBatchSize = TOTAL_REQUESTS%0A yield tconf%0A tconf.Max3PCBatchWait = old_max_batch_wait%0A tconf.Max3PCBatchSize = old_max_batch_size%0A%0A%0Adef test_apply_stashed_partially_ordered(looper,%0A txnPoolNodeSet,%0A sdk_pool_handle,%0A sdk_wallet_client):%0A test_node = getNonPrimaryReplicas(txnPoolNodeSet)%5B0%5D.node%0A test_stasher = test_node.nodeIbStasher%0A ledger_size = max(node.domainLedger.size for node in txnPoolNodeSet)%0A%0A def check_pool_ordered_some_requests():%0A assert max(node.domainLedger.size for node in txnPoolNodeSet) %3E ledger_size%0A%0A def check_test_node_has_stashed_ordered_requests():%0A assert len(test_node.stashedOrderedReqs) %3E 0%0A%0A with delay_rules(test_stasher, cDelay()):%0A reqs = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, TOTAL_REQUESTS)%0A looper.run(eventually(check_pool_ordered_some_requests))%0A test_node.mode = Mode.syncing%0A%0A looper.run(eventually(check_test_node_has_stashed_ordered_requests))%0A%0A req_idr = test_node.stashedOrderedReqs%5B0%5D.reqIdr%0A req_idr = req_idr%5B:len(req_idr) // 2%5D%0A assert len(req_idr) %3E 1%0A%0A ledger_info = test_node.ledgerManager.getLedgerInfoByType(DOMAIN_LEDGER_ID)%0A for id in req_idr:%0A txn = reqToTxn(test_node.requests%5Bid%5D.finalised)%0A ledger_info.ledger.add(txn)%0A ledger_info.postTxnAddedToLedgerClbk(DOMAIN_LEDGER_ID, txn)%0A%0A test_node.mode = Mode.participating%0A test_node.processStashedOrderedReqs()%0A%0A ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)%0A%0A sdk_get_and_check_replies(looper, reqs)%0A
cc7eb329a7d132947861ca1f2d4713cba1e4274a
Add tests!
test_processor.py
test_processor.py
Python
0
@@ -0,0 +1,2628 @@ +from ivl_enums import IvlElabType, IvlPortType, IvlDataDirection%0Afrom parsers import parse_modules_and_elabs%0Afrom utils import IvlNetManager%0A%0Aimport pytest%0Aimport sure # noqa%0A%0A%[email protected]_fixture%0Adef read_netlist():%0A # Read a netlist and parse it into modules and elabs.%0A # Create a new net manager.%0A with open('test.netlist') as f:%0A test_netlist = f.read()%0A net_manager = IvlNetManager()%0A modules, elabs = parse_modules_and_elabs(test_netlist, net_manager)%0A yield (modules, elabs, net_manager)%0A%0A%0Adef test_counts(read_netlist):%0A # Make sure the right number of things are produced%0A modules, elabs, net_manager = read_netlist%0A len(modules).should.be.equal(6)%0A len(elabs).should.be.equal(27)%0A%0A%0Adef test_types(read_netlist):%0A modules, elabs, net_manager = read_netlist%0A # Make sure the right types appear%0A len(%5Bm for m in modules if m.xtype == 'tff'%5D).should.be.equal(3)%0A net_part_selects = %5Be for e in elabs if%0A e.xtype is IvlElabType.net_part_select%5D%0A len(net_part_selects).should.be.equal(18)%0A posedges = %5Be for e in elabs if e.xtype is IvlElabType.posedge%5D%0A len(posedges).should.be.equal(3)%0A logics = %5Be for e in elabs if e.xtype is IvlElabType.logic%5D%0A len(logics).should.be.equal(6)%0A%0A%0Adef test_ports(read_netlist):%0A # Make sure ports are generated properly%0A modules, elabs, net_manager = read_netlist%0A tb = %5Bm for m in modules if m.xtype == 'bargraph_testbench'%5D%5B0%5D%0A len(tb.ports).should.be.equal(3)%0A regs = %5Bp for p in tb.ports if p.xtype is IvlPortType.reg%5D%0A len(regs).should.be.equal(1)%0A wires = %5Bp for p in tb.ports if p.xtype is IvlPortType.wire%5D%0A len(wires).should.be.equal(2)%0A%0A%0Adef test_local_ports(read_netlist):%0A # Check for generation of local wire-type ports%0A modules, elabs, net_manager = read_netlist%0A bg = %5Bm for m in modules if m.xtype == 'bargraph3'%5D%5B0%5D%0A local_ports = %5Bp for p in bg.ports if p.is_local%5D%0A len(local_ports).should.be.equal(15)%0A%0A%0Adef test_port_types(read_netlist):%0A # Check for proper port typing%0A modules, elabs, net_manager = read_netlist%0A tff = %5Bm for m in modules if m.xtype == 'tff'%5D%5B0%5D%0A inputs = %5Bp for p in tff.ports if%0A p.direction is IvlDataDirection.input%5D%0A len(inputs).should.be.equal(2)%0A outputs = %5Bp for p in tff.ports if%0A p.direction is IvlDataDirection.output%5D%0A len(outputs).should.be.equal(2)%0A%0A%0Adef test_nets(read_netlist):%0A # Check for proper net generation%0A modules, elabs, net_manager = read_netlist%0A to_bg = net_manager.get_net('0x7fbd08d0a950')%0A len(to_bg.members).should.be.equal(3)%0A
823d10795b22b751647e79e77eecd381cf7a809d
create test file
test_threetaps.py
test_threetaps.py
Python
0.000001
@@ -0,0 +1,139 @@ +#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0A%22%22%22Tests for threetaps.%22%22%22%0A%0Aimport unittest%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
f9fd2e3dcc4c25fd7561f8898e3845992553a8a8
add wrapper script to launch tests
tests/run.py
tests/run.py
Python
0.000001
@@ -0,0 +1,316 @@ +#!/usr/bin/python%0A%0Aimport os%0A%0Aroot = os.path.join(os.path.dirname(__file__), '..')%0Aprog = os.path.join(os.path.dirname(__file__), 'qdjango-tests')%0A%0Apath = %5B%5D%0Afor component in %5B'db', 'http', 'script'%5D:%0A path.append(os.path.join(root, 'src', component))%0A%0Aos.system(%22LD_LIBRARY_PATH=%25s %25s%22 %25 (':'.join(path), prog))%0A
e793612623653ae75df641eedfb2dc96e5230e4d
Set the user id in al linking
src/satosa/account_linking.py
src/satosa/account_linking.py
""" An account linking module for the satosa proxy """ import json import logging import requests from jwkest.jwk import rsa_load, RSAKey from jwkest.jws import JWS from satosa.exception import SATOSAAuthenticationError from satosa.internal_data import InternalResponse from satosa.logging import satosa_logging from satosa.response import Redirect LOGGER = logging.getLogger(__name__) class AccountLinkingModule(object): """ Module for handling account linking and recovery. Uses an external account linking service """ STATE_KEY = "ACCOUNT_LINKING_KEY" def __init__(self, config, callback_func): """ :type config: satosa.satosa_config.SATOSAConfig :type callback_func: (satosa.context.Context, satosa.internal_data.InternalResponse) -> satosa.response.Response :param config: The SATOSA proxy config :param callback_func: Callback function when the linking is done """ self.config = config self.callback_func = callback_func self.enabled = \ "ACCOUNT_LINKING" in config and ("enable" not in config.ACCOUNT_LINKING or config.ACCOUNT_LINKING["enable"]) if self.enabled: self.proxy_base = config.BASE self.al_rest_uri = config.ACCOUNT_LINKING["rest_uri"] self.al_redirect = config.ACCOUNT_LINKING["redirect"] self.endpoint = config.ACCOUNT_LINKING["endpoint"] self.verify_ssl = True if "verify_ssl" not in config.ACCOUNT_LINKING else \ config.ACCOUNT_LINKING["verify_ssl"] _bkey = rsa_load(config.CONSENT["sign_key"]) self.sign_key = RSAKey().load_key(_bkey) self.sign_key.use = "sig" LOGGER.info("Account linking is active") else: LOGGER.info("Account linking is not active") def _handle_al_response(self, context): """ Endpoint for handling account linking service response :type context: satosa.context.Context :rtype: satosa.response.Response :param context: The current context :return: response """ saved_state = context.state.get(AccountLinkingModule.STATE_KEY) internal_response = InternalResponse.from_dict(saved_state) return self.manage_al(context, internal_response) def manage_al(self, context, internal_response): """ Manage account linking and recovery :type context: satosa.context.Context :type internal_response: satosa.internal_data.InternalResponse :rtype: satosa.response.Response :param context: :param internal_response: :return: response """ if not self.enabled: return self.callback_func(context, internal_response) issuer = internal_response.auth_info.issuer id = internal_response.get_user_id() status_code, message = self._get_uuid(context, issuer, id) if status_code == 200: satosa_logging(LOGGER, logging.INFO, "issuer/id pair is linked in AL service", context.state) internal_response.user_id = message return self.callback_func(context, internal_response) return self._approve_new_id(context, internal_response, message) def _approve_new_id(self, context, internal_response, ticket): """ Redirect the user to approve the new id :type context: satosa.context.Context :type internal_response: satosa.internal_data.InternalResponse :type ticket: str :rtype: satosa.response.Redirect :param context: The current context :param internal_response: The internal response :param ticket: The ticket given by the al service :return: A redirect to approve the new id linking """ satosa_logging(LOGGER, logging.INFO, "A new ID must be linked by the AL service", context.state) context.state.add(AccountLinkingModule.STATE_KEY, internal_response.to_dict()) return Redirect("%s/%s" % (self.al_redirect, ticket)) def _get_uuid(self, context, issuer, id): """ Ask the account linking service for a uuid. If the given issuer/id pair is not linked, then the function will return a ticket. This ticket should be used for linking the issuer/id pair to the user account :type context: satosa.context.Context :type issuer: str :type id: str :rtype: (int, str) :param context: The current context :param issuer: the issuer used for authentication :param id: the given id :return: response status code and message (200, uuid) or (400, ticket) """ data = {"idp": issuer, "id": id, "redirect_endpoint": "%s/account_linking/%s" % (self.proxy_base, self.endpoint)} jws = self._to_jws(data) try: request = "{}/get_id?jwt={}".format(self.al_rest_uri, jws) response = requests.get(request, verify=self.verify_ssl) except ConnectionError as con_exc: msg = "Could not connect to account linking service" satosa_logging(LOGGER, logging.CRITICAL, msg, context.state, exc_info=True) raise SATOSAAuthenticationError(context.state, msg) from con_exc if response.status_code != 200 and response.status_code != 400: msg = "Got status code '%s' from account linking service" % (response.status_code) satosa_logging(LOGGER, logging.CRITICAL, msg, context.state) raise SATOSAAuthenticationError(context.state, msg) return response.status_code, response.text def _to_jws(self, data): """ Converts data to a jws :type data: Any :rtype: str :param data: Data to be converted to jws :return: a jws """ algorithm = "RS256" _jws = JWS(json.dumps(data), alg=algorithm) return _jws.sign_compact([self.sign_key]) def register_endpoints(self): """ Register consent module endpoints :rtype: list[(srt, (satosa.context.Context) -> Any)] :return: A list of endpoints bound to a function """ return [("^account_linking/%s?(.*)$" % self.endpoint, self._handle_al_response)]
Python
0.000001
@@ -3202,25 +3202,28 @@ nse. +set_ user_id - = +( message +) %0A
e8a6c0adc3aa77f8e0b1399fe076b43720acb823
Test the API can run
tests/test_api.py
tests/test_api.py
Python
0
@@ -0,0 +1,433 @@ +# -*- coding: utf-8 -*-%0A%0Aimport subprocess%0Aimport requests%0Afrom unittest import TestCase%0Afrom nose.tools import assert_equal%0A%0A%0Aclass Test(TestCase):%0A%0A def setUp(self):%0A self.process = subprocess.Popen(%22openfisca-serve%22)%0A%0A def tearDown(self):%0A self.process.terminate()%0A%0A def test_response(self):%0A assert_equal(%0A requests.get(%22http://localhost:2000%22).status_code,%0A 200%0A )%0A
690c08b2b35df2d81dc0977d8bd593c45806e1c2
Add dumb log view test cases
tests/test_log.py
tests/test_log.py
Python
0
@@ -0,0 +1,305 @@ +# -*- coding: utf-8 -*-%0Afrom __future__ import absolute_import, unicode_literals%0Afrom flask import url_for%0A%0A%0Adef test_view_build_log(test_client):%0A test_client.get(url_for('log.build_log', sha='123456'))%0A%0A%0Adef test_view_lint_log(test_client):%0A test_client.get(url_for('log.lint_log', sha='123456'))%0A