commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
6da928b7e113e30af0da0aa5b18d48c9584a631d
add script
ditto.py
ditto.py
Python
0.000001
@@ -0,0 +1,887 @@ +#!/usr/local/bin/python3%0A%22%22%22%0AThe purpose of this script is to update dot files somewhere. It works in the%0Afollowing way. Two locations are set%0A%0Adothome : ($HOME)%0A absolute path to the set the dotfiles%0A%0Adotarchive : ($HOME/.dotarchive)%0A absolute path to the dot files (usually some git archive)%0A%0AThen symlinks are made from dothome to dotarchive. Simple as that.%0A%22%22%22%0A%0A%0Adef main():%0A # import os%0A # dothome = os.path.expanduser('~')%0A # dotarchive = os.path.join(dothome, '.dotarchive')%0A%0A import argparse%0A parser = argparse.ArgumentParser()%0A parser.add_argument(%22dothome%22,%0A help=%22absolute path to the dotfiles%22)%0A parser.add_argument(%22dotarchive%22,%0A help=%22absolute path to the dotfile archive%22)%0A args = parser.parse_args()%0A%0A print(args.dothome)%0A print(args.dotarchive)%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
0d0bf5b67f432fd4ee182b9026ea6e319babf9bd
Create ChamBus_create_database.py
ChamBus_create_database.py
ChamBus_create_database.py
Python
0.000003
@@ -0,0 +1,885 @@ +# coding: utf-8%0A%0A# https://github.com/ChamGeeks/GetAroundChamonix/blob/master/www/js/services/TripPlanner.js%0A%0Aimport datetime, os, requests, sqlite3%0A%0Adb_filename = 'ChamBus.db'%0Adb_url = 'https://chx-transit-db.herokuapp.com/api/export/sql'%0A %0Aif os.path.exists(db_filename):%0A exit(db_filename + ' already exists. Rename or delete it and rerun this script.')%0A%0Aprint('Initializing %7B%7D...'.format(db_filename))%0Astart = datetime.datetime.now()%0Awith sqlite3.connect(db_filename) as conn:%0A print('Reading sql commands from: %7B%7D ...'.format(db_url))%0A cursor = conn.executescript(requests.get(db_url).text)%0A print('Database tables are:')%0A cursor.execute(%22SELECT name FROM sqlite_master WHERE type='table';%22)%0A print('%5Cn'.join(sorted(x%5B0%5D for x in cursor.fetchall())))%0A conn.commit()%0Aprint('Elapsed time: %7B%7D'.format(datetime.datetime.now() - start)) %0Aprint('=====%5CnDone.')%0A
a7b31346835c8fdd1724432596650a6de137fe3f
test read_meta
test/Python/test_Func.py
test/Python/test_Func.py
Python
0.000001
@@ -0,0 +1,434 @@ +import os, sys%0Asys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../bin')))%0A%0Afrom file_def import read_meta%0Aimport unittest%0A%0Aclass BasicTestSuite(unittest.TestCase):%0A def test_read_meta(self):%0A meta_file = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../data/SraRunTable.txt'))%0A meta_dic=read_meta(meta_file, 0, 25)%0A print(meta_dic)%0A%0Aif __name__ == '__main__':%0A unittest.main()
c8d48e9996f048b1844258ef427c4359645521c6
Create solution.py
leetcode/easy/length_of_last_word/py/solution.py
leetcode/easy/length_of_last_word/py/solution.py
Python
0.000018
@@ -0,0 +1,250 @@ +class Solution(object):%0A def lengthOfLastWord(self, s):%0A %22%22%22%0A :type s: str%0A :rtype: int%0A %22%22%22%0A words = s.split()%0A %0A if len(words) %3E 0:%0A return len(words%5B-1%5D)%0A %0A return 0%0A
a84f965e16e68cb8973d6cc91fbacec56bb92a64
add lottery.py
ext/lottery.py
ext/lottery.py
Python
0.999723
@@ -0,0 +1,1774 @@ +import decimal%0Aimport logging%0A%0Aimport discord%0Afrom discord.ext import commands%0A%0Afrom .common import Cog%0A%0Alog = logging.getLogger(__name__)%0A%0APERCENTAGE_PER_TAXBANK = (0.2 / 100)%0ATICKET_PRICE = 20%0A%0A%0Aclass Lottery(Cog):%0A %22%22%22Weekly lottery.%0A%0A The lottery works with you buying a 20JC lottery ticket.%0A Every Saturday, a winner is chosen from the people%0A who bought a ticket.%0A%0A The winner gets 0.2%25 of money from all taxbanks.%0A %22%22%22%0A def __init__(self, bot):%0A super().__init__(bot)%0A self.ticket_coll = self.config.jose_db%5B'lottery'%5D%0A%0A @commands.group(aliases=%5B'l'%5D, invoke_without_command=True)%0A async def lottery(self, ctx):%0A %22%22%22Show current lottery state.%0A%0A A read 'j!help Lottery' is highly recommended.%0A %22%22%22%0A amount = decimal.Decimal(0)%0A async for account in self.jcoin.all_accounts('taxbank'):%0A amount += PERCENTAGE_PER_TAXBANK * account%5B'amount'%5D%0A%0A await ctx.send('Next saturday you have a chance to win: '%0A f'%60%7Bamount:.2%7DJC%60')%0A%0A @lottery.command()%0A async def users(self, ctx):%0A %22%22%22Show the users that are in the current lottery.%22%22%22%0A em = discord.Embed()%0A%0A users = %5B%5D%0A async for ticket in self.ticket_coll.find():%0A users.append(f'%3C@%7Bticket%5B%22user_id%22%5D%7D%3E')%0A%0A em.add_field(name='Users', value='%5Cn'.join(users))%0A await ctx.send(embed=em)%0A%0A @lottery.command()%0A async def enter(self, ctx, amount: decimal.Decimal):%0A %22%22%22Enter the weekly lottery.%22%22%22%0A await ctx.send('not implemented yet')%0A # Check if the user is in jose guild%0A # Pay 20jc to jose%0A # put user in ticket collection%0A # send message to #lottery-log%0A%0A%0Adef setup(bot):%0A bot.add_cog(Lottery(bot))%0A
210429b1acbb099479c06f5bd4ceddfabfa6ee5c
Create qualysguard_remediation_ignore_non-running_kernels.py
qualysguard_remediation_ignore_non-running_kernels.py
qualysguard_remediation_ignore_non-running_kernels.py
Python
0.000002
@@ -0,0 +1,22 @@ +#!/usr/bin/env python%0A
c92954f240ef990eae06967c12426367f0eb6319
Add migration
readthedocs/donate/migrations/0003_add-impressions.py
readthedocs/donate/migrations/0003_add-impressions.py
Python
0.000002
@@ -0,0 +1,919 @@ +# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('donate', '0002_dollar-drop-choices'),%0A %5D%0A%0A operations = %5B%0A migrations.CreateModel(%0A name='SupporterImpressions',%0A fields=%5B%0A ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),%0A ('date', models.DateField(verbose_name='Date')),%0A ('offers', models.IntegerField(default=0, verbose_name='Offer')),%0A ('views', models.IntegerField(default=0, verbose_name='View')),%0A ('clicks', models.IntegerField(default=0, verbose_name='Clicks')),%0A ('promo', models.ForeignKey(related_name='impressions', blank=True, to='donate.SupporterPromo', null=True)),%0A %5D,%0A ),%0A %5D%0A
9cdf52e71fa31c52335f9cb9c6cdb542d658dfb1
update conv tests
tests/keras_contrib/layers/test_convolutional.py
tests/keras_contrib/layers/test_convolutional.py
import pytest import numpy as np import itertools from numpy.testing import assert_allclose from keras.utils.test_utils import layer_test, keras_test from keras.utils.conv_utils import conv_input_length from keras import backend as K from keras_contrib import backend as KC from keras_contrib.layers import convolutional, pooling from keras.models import Sequential # TensorFlow does not support full convolution. if K.backend() == 'theano': _convolution_border_modes = ['valid', 'same'] else: _convolution_border_modes = ['valid', 'same'] @keras_test def test_deconvolution_3d(): nb_samples = 6 nb_filter = 4 stack_size = 2 kernel_dim1 = 12 kernel_dim2 = 10 kernel_dim3 = 8 for batch_size in [None, nb_samples]: for border_mode in _convolution_border_modes: for subsample in [(1, 1, 1), (2, 2, 2)]: if border_mode == 'same' and subsample != (1, 1, 1): continue dim1 = conv_input_length(kernel_dim1, 7, border_mode, subsample[0]) dim2 = conv_input_length(kernel_dim2, 5, border_mode, subsample[1]) dim3 = conv_input_length(kernel_dim3, 3, border_mode, subsample[2]) layer_test(convolutional.Deconvolution3D, kwargs={'filters': nb_filter, 'kernel_size': (7, 5, 3), 'output_shape': (batch_size, nb_filter, dim1, dim2, dim3), 'padding': border_mode, 'strides': subsample, 'data_format': 'channels_first'}, input_shape=(nb_samples, stack_size, kernel_dim1, kernel_dim2, kernel_dim3), fixed_batch_size=True) layer_test(convolutional.Deconvolution3D, kwargs={'filters': nb_filter, 'kernel_size': (7, 5, 3), 'output_shape': (batch_size, nb_filter, dim1, dim2, dim3), 'padding': border_mode, 'strides': subsample, 'data_format': 'channels_first', 'W_regularizer': 'l2', 'b_regularizer': 'l2', 'activity_regularizer': 'activity_l2'}, input_shape=(nb_samples, stack_size, kernel_dim1, kernel_dim2, kernel_dim3), fixed_batch_size=True) layer_test(convolutional.Deconvolution3D, kwargs={'filters': nb_filter, 'kernel_size': (7, 5, 3), 'output_shape': (nb_filter, dim1, dim2, dim3), 'padding': border_mode, 'strides': subsample, 'data_format': 'channels_first', 'W_regularizer': 'l2', 'b_regularizer': 'l2', 'activity_regularizer': 'activity_l2'}, input_shape=(nb_samples, stack_size, kernel_dim1, kernel_dim2, kernel_dim3)) @keras_test def test_cosineconvolution_2d(): nb_samples = 2 nb_filter = 2 stack_size = 3 nb_row = 10 nb_col = 6 if K.backend() == 'theano': data_format = 'channels_first' elif K.backend() == 'tensorflow': data_format = 'channels_last' for border_mode in _convolution_border_modes: for subsample in [(1, 1), (2, 2)]: for use_bias_mode in [True, False]: if border_mode == 'same' and subsample != (1, 1): continue layer_test(convolutional.CosineConvolution2D, kwargs={'filters': nb_filter, 'kernel_size': (3, 3), 'padding': border_mode, 'strides': subsample, 'use_bias': use_bias_mode, 'data_format': data_format}, input_shape=(nb_samples, nb_row, nb_col, stack_size)) layer_test(convolutional.CosineConvolution2D, kwargs={'filters': nb_filter, 'kernel_size': (3, 3), 'padding': border_mode, 'strides': subsample, 'use_bias': use_bias_mode, 'data_format': data_format, 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2', 'activity_regularizer': 'l2'}, input_shape=(nb_samples, nb_row, nb_col, stack_size)) if data_format == 'channels_first': X = np.random.randn(1, 3, 5, 5) input_dim = (3, 5, 5) W0 = X[:, :, ::-1, ::-1] elif data_format == 'channels_last': X = np.random.randn(1, 5, 5, 3) input_dim = (5, 5, 3) W0 = X[0, :, :, :, None] model = Sequential() model.add(convolutional.CosineConvolution2D(1, (5, 5), use_bias=True, input_shape=input_dim, data_format=data_format)) model.compile(loss='mse', optimizer='rmsprop') W = model.get_weights() W[0] = W0 W[1] = np.asarray([1.]) model.set_weights(W) out = model.predict(X) assert_allclose(out, np.ones((1, 1, 1, 1), dtype=K.floatx()), atol=1e-5) model = Sequential() model.add(convolutional.CosineConvolution2D(1, (5, 5), use_bias=False, input_shape=input_dim, data_format=data_format)) model.compile(loss='mse', optimizer='rmsprop') W = model.get_weights() W[0] = -2 * W0 model.set_weights(W) out = model.predict(X) assert_allclose(out, -np.ones((1, 1, 1, 1), dtype=K.floatx()), atol=1e-5) @keras_test def test_sub_pixel_upscaling(): nb_samples = 2 nb_row = 16 nb_col = 16 for scale_factor in [2, 3, 4]: input_data = np.random.random((nb_samples, 4 * (scale_factor ** 2), nb_row, nb_col)) if K.image_data_format() == 'tf': input_data = input_data.transpose((0, 2, 3, 1)) input_tensor = K.variable(input_data) expected_output = K.eval(KC.depth_to_space(input_tensor, scale=scale_factor)) layer_test(convolutional.SubPixelUpscaling, kwargs={'scale_factor': scale_factor}, input_data=input_data, expected_output=expected_output, expected_output_dtype=K.floatx()) if __name__ == '__main__': pytest.main([__file__])
Python
0
@@ -6397,10 +6397,21 @@ == ' +channels_las t -f ':%0A
dc7cf288c5c5c9733a59184770fbaa26db036833
Add basic tests for custom_urls system
tests/unit_project/test_core/test_custom_urls.py
tests/unit_project/test_core/test_custom_urls.py
Python
0
@@ -0,0 +1,1587 @@ +# -*- coding: utf-8 -*-%0Afrom djangosanetesting import UnitTestCase%0A%0Afrom django.http import Http404%0A%0Afrom ella.core.custom_urls import DetailDispatcher%0A%0A# dummy functions to register as views%0Adef view(request, bits, context):%0A return request, bits, context%0A%0Adef custom_view(request, context):%0A return request, context%0A%0A%0Aclass TestCustomUrlsDispatcher(UnitTestCase):%0A def setUp(self):%0A self.dispatcher = DetailDispatcher()%0A%0A self.context = %7B'object': self%7D%0A self.request = object()%0A%0A def test_no_extension(self):%0A self.assert_raises(Http404, self.dispatcher._get_view, 'start', self)%0A%0A def test_register_global_extension(self):%0A self.dispatcher.register('start', view)%0A self.assert_equals(view, self.dispatcher._get_view('start', self))%0A%0A def test_register_extension_for_model(self):%0A self.dispatcher.register('another_start', view, model=self.__class__)%0A self.assert_equals(view, self.dispatcher._get_view('another_start', self.__class__))%0A%0A def test_register_extension_for_model_not_work_for_other_models(self):%0A self.dispatcher.register('start', view, model=self.__class__)%0A self.assert_raises(Http404, self.dispatcher._get_view, 'start', object())%0A%0A def test_no_custom_view(self):%0A self.assert_raises(Http404, self.dispatcher._get_custom_detail_view, self.__class__)%0A%0A def test_register_custom_view(self):%0A self.dispatcher.register_custom_detail(self.__class__, custom_view)%0A self.assert_equals(custom_view, self.dispatcher._get_custom_detail_view(self.__class__))%0A%0A
861120c5ba7e6e126cac13497a489bc035d27026
add partition show
bin/partition_show.py
bin/partition_show.py
Python
0
@@ -0,0 +1,1208 @@ +#!/usr/bin/python%0Aimport datetime%0Aimport MySQLdb%0Aimport json%0Aimport os%0A%0ACONFIG_FILE=%22partition.json%22%0A%0A# -----------------------------------%0Adef config_read(filename):%0A config = json.load(open(filename))%0A return config%0A%0A# -----------------------------------%0A%0Adef date_show_all_partitions(conn, tablename):%0A lists = %5B%5D%0A infotable = %22information_schema.PARTITIONS%22%0A sql = %22SELECT PARTITION_NAME FROM %22+ infotable +%22 WHERE TABLE_NAME='%22+ tablename +%22' ORDER BY PARTITION_NAME desc;%22%0A cur = conn.cursor()%0A cur.execute(sql)%0A res = cur.fetchall()%0A for row in res:%0A lists.append(row%5B0%5D)%0A cur.close()%0A return lists%0A%0Adef partition_exec(conn, table):%0A lists = date_show_all_partitions(conn, table)%0A for v in lists:%0A if v == %22pmax%22:%0A continue%0A print table + %22:%22 + v%0A%0Adef main():%0A path = os.path.join(os.path.join(os.path.dirname(__file__), %22..%22), %22config%22);%0A conf = config_read(os.path.join(path, CONFIG_FILE))%0A myconf = conf%5B%22MYSQL%22%5D%0A conn = MySQLdb.connect(host=myconf%5B%22HOST%22%5D, db=myconf%5B%22DB%22%5D, user=myconf%5B%22USER%22%5D, passwd=myconf%5B%22PASS%22%5D)%0A%0A for table in conf%5B%22TABLES%22%5D:%0A partition_exec(conn, table)%0A%0A conn.close()%0A%0Amain()%0A
c50a7189e730fc3e95eb209eed00ebdcd7001bde
Create ImgurStorage.py
ImgurStorage.py
ImgurStorage.py
Python
0.000001
@@ -0,0 +1,2414 @@ +import base64%0Aimport os%0Aimport tempfile%0A%0Afrom django.core.exceptions import SuspiciousFileOperation%0Afrom django.core.files import File%0Afrom django.utils._os import safe_join%0Aimport requests%0A%0Afrom django.core.files.storage import Storage%0Afrom imgurpython import ImgurClient%0A%0Aclass ImgurStorage(Storage):%0A %22%22%22%0A Uses the Imgur cloud service to store images.%0A Great for Heroku%0A %0A This is just a gist, needs some work.%0A %22%22%22%0A client_id = %22LOL%22%0A client_secret = %22LOL%22%0A access_token = %22LOL%22%0A refresh_token = %22LOL%22%0A%0A def upload(self, path):%0A return self.client.upload_from_path(path)%0A%0A def __init__(self):%0A super(ImgurStorage, self).__init__()%0A self.client = ImgurClient(self.client_id, self.client_secret, self.access_token, self.refresh_token)%0A%0A def _open(self, name, mode='rb'):%0A file_url = %22http://i.imgur.com/%7B0%7D.png%22.format(name)%0A r = requests.get(file_url)%0A f = tempfile.NamedTemporaryFile(delete=False)%0A for chunk in r.iter_content(chunk_size=512 * 1024):%0A if chunk: # filter out keep-alive new chunks%0A f.write(chunk)%0A f.close()%0A return File(f)%0A%0A def uploaded_path(self, name):%0A try:%0A path = safe_join(self.location, name)%0A except ValueError:%0A raise SuspiciousFileOperation(%22Attempted access to '%25s' denied.%22 %25 name)%0A return os.path.normpath(path)%0A%0A def get_available_name(self, name):%0A return name%0A%0A def _save(self, name, content):%0A %22%22%22%0A Saves new content to the file specified by name. The content should be%0A a proper File object or any python file-like object, ready to be read%0A from the beginning.%0A %22%22%22%0A # Get the proper name for the file, as it will actually be saved.%0A if name is None:%0A name = content.name%0A%0A if not hasattr(content, 'chunks'):%0A content = File(content)%0A%0A content.open()%0A%0A data = %7B%0A 'image': base64.b64encode(content.read()),%0A 'type': 'base64',%0A 'meta': %7B%7D%0A %7D%0A ret = self.client.make_request('POST', 'upload', data, True)%0A content.close()%0A return ret%5B%22id%22%5D%0A%0A def url(self, name):%0A return %22http://i.imgur.com/%7B0%7D.png%22.format(name)%0A%0A def get_valid_name(self, name):%0A return name%0A%0A def exists(self, name):%0A return True%0A
c153bc9422308599d1354abf782273ca7bd78952
Add a few unit tests for libvirt_conn.
nova/tests/virt_unittest.py
nova/tests/virt_unittest.py
Python
0
@@ -0,0 +1,3084 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4%0A#%0A# Copyright 2010 OpenStack LLC%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22); you may%0A# not use this file except in compliance with the License. You may obtain%0A# a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS, WITHOUT%0A# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the%0A# License for the specific language governing permissions and limitations%0A# under the License.%0A%0Afrom nova import flags%0Afrom nova import test%0Afrom nova.virt import libvirt_conn%0A%0AFLAGS = flags.FLAGS%0A%0A%0Aclass LibvirtConnTestCase(test.TrialTestCase):%0A def test_get_uri_and_template(self):%0A class MockDataModel(object):%0A def __init__(self):%0A self.datamodel = %7B 'name' : 'i-cafebabe',%0A 'memory_kb' : '1024000',%0A 'basepath' : '/some/path',%0A 'bridge_name' : 'br100',%0A 'mac_address' : '02:12:34:46:56:67',%0A 'vcpus' : 2 %7D%0A%0A type_uri_map = %7B 'qemu' : ('qemu:///system',%0A %5Blambda s: '%3Cdomain type=%5C'qemu%5C'%3E' in s,%0A lambda s: 'type%3Ehvm%3C/type' in s,%0A lambda s: 'emulator%3E/usr/bin/kvm' not in s%5D),%0A 'kvm' : ('qemu:///system',%0A %5Blambda s: '%3Cdomain type=%5C'kvm%5C'%3E' in s,%0A lambda s: 'type%3Ehvm%3C/type' in s,%0A lambda s: 'emulator%3E/usr/bin/qemu%3C' not in s%5D),%0A 'uml' : ('uml:///system',%0A %5Blambda s: '%3Cdomain type=%5C'uml%5C'%3E' in s,%0A lambda s: 'type%3Euml%3C/type' in s%5D),%0A %7D%0A%0A for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():%0A FLAGS.libvirt_type = libvirt_type%0A conn = libvirt_conn.LibvirtConnection(True)%0A%0A uri, template = conn.get_uri_and_template()%0A self.assertEquals(uri, expected_uri)%0A%0A for i, check in enumerate(checks):%0A xml = conn.toXml(MockDataModel())%0A self.assertTrue(check(xml), '%25s failed check %25d' %25 (xml, i))%0A%0A # Deliberately not just assigning this string to FLAGS.libvirt_uri and%0A # checking against that later on. This way we make sure the%0A # implementation doesn't fiddle around with the FLAGS.%0A testuri = 'something completely different'%0A FLAGS.libvirt_uri = testuri%0A for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():%0A FLAGS.libvirt_type = libvirt_type%0A conn = libvirt_conn.LibvirtConnection(True)%0A uri, template = conn.get_uri_and_template()%0A self.assertEquals(uri, testuri)%0A%0A
07500dbd92aa15540ddf77b96a7072c5f66d34b2
Add files via upload
heat_map.py
heat_map.py
Python
0
@@ -0,0 +1,643 @@ +# -*- coding: utf-8 -*-%0D%0A%22%22%22%0D%0ACreated on Wed Jun 21 17:27:18 2017%0D%0A%0D%0A@author: DWyatt%0D%0A%22%22%22%0D%0Aimport pandas as pd%0D%0Aimport seaborn as sns%0D%0Aimport sys%0D%0A%0D%0Adf_train = pd.read_csv('train.csv')%0D%0Atarget = 'SalePrice'%0D%0Avariables = %5Bcolumn for column in df_train.columns if column!=target%5D%0D%0A%0D%0Acorr = df_train.corr()%0D%0Asns_heat= sns.heatmap(corr, square=True)%0D%0Afig = sns_heat.get_figure()%0D%0Afig.savefig('heat.png')%0D%0Aprint(%5Btarget%5D)%0D%0Aprint(variables)%0D%0A#sys.exit()%0D%0A%0D%0A#sns_pair = sns.pairplot(df_train,%0D%0A #x_vars=%5B'SalePrice'%5D,%0D%0A #y_vars=%5B'LotFrontage', 'Neighborhood'%5D)%0D%0A#fig = sns_pair.get_figure()%0D%0A#fig.savefig('pair.png')
e755977ee0ada391149e55d3331bf2ffe045d243
Add a build configuration test for zlib, for #187
examples/tests/test_build_config.py
examples/tests/test_build_config.py
Python
0
@@ -0,0 +1,1791 @@ +#! /usr/bin/env python%0A# -*- coding: utf-8 -*-%0A# vi:ts=4:et%0A%0Aimport pycurl%0Aimport zlib%0Atry:%0A from io import BytesIO%0Aexcept ImportError:%0A try:%0A from cStringIO import StringIO as BytesIO%0A except ImportError:%0A from StringIO import StringIO as BytesIO%0A%0Ac = pycurl.Curl()%0Ac.setopt(c.URL, 'http://pycurl.sourceforge.net')%0A#c.setopt(c.ENCODING, 'deflate')%0Ac.setopt(c.HTTPHEADER, %5B'Accept-Encoding: deflate'%5D)%0Abody = BytesIO()%0Ac.setopt(c.WRITEFUNCTION, body.write)%0Aencoding_found = False%0Adef header_function(header):%0A global encoding_found%0A if header.decode('iso-8859-1').lower().startswith('content-encoding: deflate'):%0A encoding_found = True%0Ac.setopt(c.HEADERFUNCTION, header_function)%0Ac.perform()%0Aassert encoding_found%0Aprint('Server supports deflate encoding')%0Aencoded = body.getvalue()%0A# should not raise exceptions%0Azlib.decompress(encoded, -zlib.MAX_WBITS)%0Aprint('Server served deflated body')%0A%0Ac.reset()%0Ac.setopt(c.URL, 'http://pycurl.sourceforge.net')%0Ac.setopt(c.ENCODING, 'deflate')%0Abody = BytesIO()%0Ac.setopt(c.WRITEFUNCTION, body.write)%0Aencoding_found = False%0Adef header_function(header):%0A global encoding_found%0A if header.decode('iso-8859-1').lower().startswith('content-encoding: deflate'):%0A encoding_found = True%0Ac.setopt(c.HEADERFUNCTION, header_function)%0Ac.perform()%0Aassert encoding_found%0Aprint('Server claimed deflate encoding as expected')%0A# body should be decoded%0Aencoded = body.getvalue()%0Aif '%3Chtml' in encoded.decode('iso-8859-1').lower():%0A print('Curl inflated served body')%0Aelse:%0A fail = False%0A try:%0A zlib.decompress(encoded, -zlib.MAX_WBITS)%0A print('Curl did not inflate served body')%0A fail = True%0A except:%0A print('Weird')%0A fail = True%0A if fail:%0A assert False%0A%0Ac.close()%0A
9f1c5612c717bac3690d093a27a0a362ff4793b4
add parameters class for fitting data
nsls2/fitting/parameters.py
nsls2/fitting/parameters.py
Python
0
@@ -0,0 +1,1305 @@ +# Copyright (c) Brookhaven National Lab 2O14%0A# All rights reserved%0A# BSD License%0A# See LICENSE for full text%0A# @author: Li Li ([email protected])%0A# created on 07/20/2014%0A%0A%0A%0A%0Aclass ParameterBase(object):%0A %22%22%22%0A base class to save data structure %0A for each fitting parameter%0A %22%22%22%0A def __init__(self):%0A self.val = None%0A self.min = None%0A self.max = None%0A return%0A %0A%0Aclass Parameters(object):%0A %0A def __init__(self):%0A self.p_dict = %7B%7D%0A return%0A%0A def add(self, **kwgs):%0A if kwgs.has_key('name'):%0A self.p_dict%5Bkwgs%5B'name'%5D%5D = ParameterBase()%0A %0A if kwgs.has_key('val'): %0A self.p_dict%5Bkwgs%5B'name'%5D%5D.val = kwgs%5B'val'%5D%0A %0A if kwgs.has_key('min'): %0A self.p_dict%5Bkwgs%5B'name'%5D%5D.min = kwgs%5B'min'%5D%0A %0A if kwgs.has_key('max'): %0A self.p_dict%5Bkwgs%5B'name'%5D%5D.max = kwgs%5B'max'%5D%0A %0A else:%0A print %22please define parameter name first.%22%0A print %22please define parameters as %25s, %25s, %25s, %25s%22 %5C%0A %25('name', 'val', 'min', 'max') %0A %0A return%0A %0A %0A def __getitem__(self, name):%0A return self.p_dict%5Bname%5D%0A %0A %0A def all(self):%0A return self.p_dict
17de6f90ce081984cab528526fcf9d9e7008be14
Create beta_scraping_get_users_honor.py
Solutions/beta/beta_scraping_get_users_honor.py
Solutions/beta/beta_scraping_get_users_honor.py
Python
0.000001
@@ -0,0 +1,514 @@ +from bs4 import BeautifulSoup as BS%0Afrom urllib.request import urlopen%0A%0AUrl = 'https://www.codewars.com/users/leaderboard'%0A%0Adef get_honor(username):%0A html = urlopen(Url).read().decode('utf-8')%0A soup = BS(html, 'html.parser')%0A %0A for i in soup.find_all('tr'):%0A try:%0A a = str(i).split('%3C/td%3E')%0A user = a%5B0%5D%5B19:(a%5B0%5D.find('%3E')-1)%5D%0A if user == username:%0A return int(a%5B-2%5D%5B4:%5D)%0A except:%0A continue%0A return %22Username not found!%22%0A
8c737c22ae5d896f5445995660d664d959ce1c08
add ctc reader
fluid/ocr_recognition/ctc_reader.py
fluid/ocr_recognition/ctc_reader.py
Python
0
@@ -0,0 +1,2085 @@ +import os%0Aimport cv2%0Aimport numpy as np%0A%0Afrom paddle.v2.image import load_image%0A%0A%0Aclass DataGenerator(object):%0A def __init__(self):%0A pass%0A%0A def train_reader(self, img_root_dir, img_label_list):%0A '''%0A Reader interface for training.%0A%0A%09%09:param img_root_dir: The root path of the image for training.%0A :type file_list: str %0A%0A :param img_label_list: The path of the %3Cimage_name, label%3E file for training.%0A :type file_list: str %0A%0A '''%0A # sort by height, e.g. idx%0A img_label_lines = %5B%5D%0A for line in open(img_label_list):%0A # h, w, img_name, labels%0A items = line.split(' ')%0A idx = %22%7B:0%3E5d%7D%22.format(int(items%5B0%5D))%0A img_label_lines.append(idx + ' ' + line)%0A img_label_lines.sort()%0A%0A def reader():%0A for line in img_label_lines:%0A # h, w, img_name, labels%0A items = line.split(' ')%5B1:%5D%0A%0A assert len(items) == 4%0A%0A label = %5Bint(c) for c in items%5B-1%5D.split(',')%5D%0A%0A img = load_image(os.path.join(img_root_dir, items%5B2%5D))%0A img = np.transpose(img, (2, 0, 1))%0A #img = img%5Bnp.newaxis, ...%5D%0A%0A yield img, label%0A%0A return reader%0A%0A def test_reader(self, img_root_dir, img_label_list):%0A '''%0A Reader interface for inference.%0A%0A%09%09:param img_root_dir: The root path of the images for training.%0A :type file_list: str %0A%0A :param img_label_list: The path of the %3Cimage_name, label%3E file for testing.%0A :type file_list: list%0A '''%0A%0A def reader():%0A for line in open(img_label_list):%0A # h, w, img_name, labels%0A items = line.split(' ')%0A%0A assert len(items) == 4%0A%0A label = %5Bint(c) for c in items%5B-1%5D.split(',')%5D%0A%0A img = load_image(os.path.join(img_root_dir, items%5B2%5D))%0A img = np.transpose(img, (2, 0, 1))%0A #img = img%5Bnp.newaxis, ...%5D%0A%0A yield img, label%0A%0A return reader%0A
90c7f90a8d409fd68ebe20ed4ac35fd378abfee5
Create flush.py
flush.py
flush.py
Python
0.000004
@@ -0,0 +1,163 @@ +f = open('out.log', 'w+')%0Af.write('output is ')%0A%0A# some work%0As = 'OK.'%0Af.write(s)%0Af.write('%5Cn')%0Af.flush()%0A%0A# some other work%0Af.write('done%5Cn')%0Af.flush()%0Af.close()%0A
ea11ae8919139eae8eaa6b9b1dfe256726d3c584
Copy SBSolarcell tests into individual file
test/test_SBSolarcell.py
test/test_SBSolarcell.py
Python
0
@@ -0,0 +1,2565 @@ +# -*- coding: utf-8 -*-%0Aimport numpy as np%0Aimport ibei%0Afrom astropy import units%0Aimport unittest%0A%0Atemp_sun = 5762.%0Atemp_earth = 288.%0Abandgap = 1.15%0A%0Ainput_params = %7B%22temp_sun%22: temp_sun,%0A %22temp_planet%22: temp_earth,%0A %22bandgap%22: bandgap,%0A %22voltage%22: 0.5,%7D%0A%0A%0Aclass CalculatorsReturnUnits(unittest.TestCase):%0A %22%22%22%0A Tests units of the calculator methods returned values.%0A %22%22%22%0A def setUp(self):%0A %22%22%22%0A Initialize SBSolarcell object from input_params%0A %22%22%22%0A self.solarcell = ibei.SQSolarcell(input_params)%0A%0A def test_calc_blackbody_radiant_power_density(self):%0A %22%22%22%0A calc_blackbody_radiant_power_density should return value with unit of W m%5E-2.%0A %22%22%22%0A tested_unit = self.solarcell.calc_blackbody_radiant_power_density().unit%0A target_unit = units.Unit(%22W/m2%22)%0A self.assertEqual(tested_unit, target_unit)%0A%0A def test_calc_power_density(self):%0A %22%22%22%0A calc_power_density should return value with unit of W m%5E-2.%0A %22%22%22%0A tested_unit = self.solarcell.calc_power_density().unit%0A target_unit = units.Unit(%22W/m2%22)%0A self.assertEqual(tested_unit, target_unit)%0A%0A def test_calc_power_density_zero_bandgap(self):%0A %22%22%22%0A calc_power_density should return value with unit of W m%5E-2.%0A %22%22%22%0A self.solarcell.bandgap = 0%0A tested_unit = self.solarcell.calc_power_density().unit%0A target_unit = units.Unit(%22W/m2%22)%0A self.assertEqual(tested_unit, target_unit)%0A%0A%0Aclass CalculatorsReturnType(unittest.TestCase):%0A %22%22%22%0A Tests type of the calculator methods returned values.%0A %22%22%22%0A def setUp(self):%0A %22%22%22%0A Initialize SBSolarcell object from input_params%0A %22%22%22%0A self.solarcell = ibei.SQSolarcell(input_params)%0A%0A def test_calc_efficiency(self):%0A %22%22%22%0A calc_power_density should return value with unit of W m%5E-2.%0A %22%22%22%0A self.assertIsInstance(self.solarcell.calc_efficiency(), float)%0A%0A%0Aclass CalculatorsReturnValue(unittest.TestCase):%0A %22%22%22%0A Tests special values of the calculator methods.%0A %22%22%22%0A def setUp(self):%0A %22%22%22%0A Initialize SBSolarcell object from input_params%0A %22%22%22%0A self.solarcell = ibei.SQSolarcell(input_params)%0A%0A def test_calc_power_density(self):%0A %22%22%22%0A calc_power_density should return 0 when bandgap = 0.%0A %22%22%22%0A self.solarcell.bandgap = 0%0A self.assertEqual(0, self.solarcell.calc_power_density())%0A%0A%0Aif __name__ == %22__main__%22:%0A pass%0A
a973b1daca340031c671070e0f102a6114f58fab
add files
mysite/wordclips/ventriloquy/test_ventriloquy.py
mysite/wordclips/ventriloquy/test_ventriloquy.py
Python
0.000002
@@ -0,0 +1,1640 @@ +from django.test import TestCase%0Afrom wordclips.ventriloquy.ventriloquy import Ventriloquy%0Afrom wordclips.models import Wordclip%0A%0A%0Aclass VentriloquyTestCase(TestCase):%0A def setUp(self):%0A self.ventriloquy = Ventriloquy()%0A # Put dummy object in databse for testing purpose%0A Wordclip.objects.create(name=%22how%22)%0A Wordclip.objects.create(name=%22are%22)%0A Wordclip.objects.create(name=%22you%22)%0A Wordclip.objects.create(name=%22people%22)%0A%0A%0A def test_found_in_db(self):%0A%0A err, lst = self.ventriloquy.check_words(%5B%22how%22, %22are%22, %22you%22%5D)%0A o1 = Wordclip.objects.get(name=%22how%22)%0A o2 = Wordclip.objects.get(name=%22are%22)%0A o3 = Wordclip.objects.get(name=%22you%22)%0A self.assertEqual(err, 0)%0A self.assertEqual(lst, %5Bo1, o2, o3%5D)%0A%0A def test_not_found_in_db(self):%0A %22%22%22%0A Test objects not being found in the database,%0A the first word that can not be found will be returned%0A %22%22%22%0A err, lst = self.ventriloquy.check_words(%5B%22how%22, %22shooot%22%5D)%0A self.assertEqual(err, -1)%0A self.assertEqual(lst, %22shooot%22)%0A%0A%0A%0A def test_creating_audio_success(self):%0A %22%22%22%0A Test audio being successfully created%0A %22%22%22%0A err, lst = self.ventriloquy.create_audio(%5B%22how%22, %22are%22, %22you%22, %22people%22%5D)%0A self.assertEqual(err, 0)%0A self.assertEqual(lst, %5B%5D)%0A%0A def test_creating_audio_failed(self):%0A %22%22%22%0A Test audio created failed%0A %22%22%22%0A err, lst = self.ventriloquy.create_audio(%5B%22how%22, %22are%22, %22you%22, %22people%22, %22damn%22, %22it%22%5D)%0A self.assertEqual(err, -1)%0A self.assertEqual(lst, %22damn%22)%0A
8fd466ecd16db736177104902eb84f661b2b62cc
Create sitemap for google news
opps/sitemaps/googlenews.py
opps/sitemaps/googlenews.py
Python
0.000095
@@ -0,0 +1,873 @@ +#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0Afrom django.contrib.sitemaps import GenericSitemap%0Afrom django.contrib.sites.models import Site%0A%0A%0Aclass GoogleNewsSitemap(GenericSitemap):%0A # That's Google News limit. Do not increase it!%0A limit = 1000%0A sitemap_template = 'sitemap_googlenews.xml'%0A%0A def get_urls(self, page=1, site=None):%0A if site is None:%0A site = Site.objects.get_current()%0A sup = super(GoogleNewsSitemap, self)%0A old_urls = sup.get_urls(page, site)%0A urls = %5B%5D%0A for item in self.paginator.page(page).object_list:%0A for url in old_urls:%0A loc = %22http://%25s%25s%22 %25 (site.domain, self.location(item))%0A if url.get('location') == loc:%0A old_urls.remove(url)%0A url%5B'item'%5D = item%0A urls.append(url)%0A return urls%0A
2a106a12db2a59ccb0517a13db67b35f475b3ef5
Add args to survey_data url
apps/survey/urls.py
apps/survey/urls.py
from django.conf.urls.defaults import * from . import views urlpatterns = patterns('', url(r'^profile/$', views.profile_index, name='survey_profile'), url(r'^profile/electric/$', views.profile_electric, name='survey_profile_electric'), url(r'^main/$', views.main_index), url(r'^group_management/$', views.group_management, name='group_management'), url(r'^survey_management/$', views.survey_management, name='survey_management'), url(r'^survey_data/$', views.survey_data, name='survey_management'), url(r'^thanks_profile/$', views.thanks_profile, name='profile_thanks'), url(r'^$', views.index, name='survey_index'), )
Python
0.000002
@@ -472,54 +472,178 @@ ata/ -$', views.survey_data, name='survey_management +(?P%3Csurvey_shortname%3E.+)/(?P%3Cid%3E%5Cd+)/$', views.survey_data, name='survey_data'),%0A #url(r'%5Esurvey_data/(?P%3Csurvey_shortname%3E.+)/$', views.survey_data, name='survey_data '),%0A
8822eba1c4351f8cc575fdb33c15bcd6a27bf21c
allow for nodePort to be None in case of ClusterIP
kubernetes/models/v1/ServicePort.py
kubernetes/models/v1/ServicePort.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # This file is subject to the terms and conditions defined in # file 'LICENSE.md', which is part of this source code package. # from kubernetes.utils import is_valid_string class ServicePort(object): VALID_PROTOCOLS = ['TCP', 'UDP'] def __init__(self, name=None, model=None): super(ServicePort, self).__init__() self._name = None self._protocol = None self._port = None self._target_port = None self._node_port = None if name is not None: self.name = name if model is not None: if 'name' in model: self.name = model['name'] if 'protocol' in model: self.protocol = model['protocol'] if 'port' in model: self.port = model['port'] if 'targetPort' in model: self.target_port = model['targetPort'] if 'nodePort' in model: self.node_port = model['nodePort'] # ------------------------------------------------------------------------------------- name @property def name(self): return self._name @name.setter def name(self, name=None): if not is_valid_string(name): raise SyntaxError('ServicePort: name: [ {} ] is invalid.'.format(name)) self._name = name # ------------------------------------------------------------------------------------- protocol @property def protocol(self): return self._protocol @protocol.setter def protocol(self, protocol=None): if not is_valid_string(protocol) or protocol.upper() not in ServicePort.VALID_PROTOCOLS: raise SyntaxError('ServicePort: protocol: [ {} ] is invalid.'.format(protocol)) self._protocol = protocol.upper() # ------------------------------------------------------------------------------------- port @property def port(self): return self._port @port.setter def port(self, port=None): if isinstance(port, str) and port.isdigit(): port = int(port) if not isinstance(port, int): raise SyntaxError('ServicePort: port: [ {} ] is invalid.'.format(port)) self._port = port # ------------------------------------------------------------------------------------- targetPort @property def target_port(self): return self._target_port @target_port.setter def target_port(self, port=None): msg = 'ServicePort: target_port: [ {} ] is invalid.'.format(port) try: p = int(port) except ValueError: if not is_valid_string(port): raise SyntaxError(msg) p = port except TypeError: raise SyntaxError(msg) self._target_port = p # ------------------------------------------------------------------------------------- nodePort @property def node_port(self): return self._node_port @node_port.setter def node_port(self, port=None): if isinstance(port, str) and port.isdigit(): port = int(port) if not isinstance(port, int): raise SyntaxError('ServicePort: node_port: [ {} ] is invalid.'.format(port)) self._node_port = port # ------------------------------------------------------------------------------------- serialize def serialize(self): data = {} if self.name is not None: data['name'] = self.name if self.protocol is not None: data['protocol'] = self.protocol if self.port is not None: data['port'] = self.port if self.target_port is not None: data['targetPort'] = self.target_port if self.node_port is not None: data['nodePort'] = self.node_port return data
Python
0.000398
@@ -3093,32 +3093,53 @@ ne):%0A if +port is not None and isinstance(port, @@ -3196,32 +3196,53 @@ ort)%0A if +port is not None and not isinstance(p
f3a6281098b11ddd353a394d914186d5c7683f9b
add jupyter module
rasa/jupyter.py
rasa/jupyter.py
Python
0
@@ -0,0 +1,1320 @@ +import pprint as pretty_print%0Afrom typing import Any, Dict, Text, TYPE_CHECKING%0Afrom rasa_core.utils import print_success, print_error%0A%0Aif TYPE_CHECKING:%0A from rasa_core.agent import Agent%0A from rasa_core.interpreter import NaturalLanguageInterpreter%0A%0A%0Adef pprint(object: Any):%0A pretty_print.pprint(object, indent=2)%0A%0A%0Adef chat(model: Text = None, agent: 'Agent' = None,%0A interpreter: 'NaturalLanguageInterpreter' = None):%0A%0A if model:%0A from rasa.run import create_agent%0A agent = create_agent(model)%0A elif agent and interpreter:%0A agent.set_interpreter(interpreter)%0A else:%0A print_error(%22You either have to define a model path or an agent and %22%0A %22an interpreter.%22)%0A%0A print(%22Your bot is ready to talk! Type your messages here or send '/stop'.%22)%0A while True:%0A message = input()%0A if message == '/stop':%0A break%0A%0A for response in agent.handle_text(message):%0A _display_bot_response(response)%0A%0A%0Adef _display_bot_response(response: Dict):%0A from IPython.display import Image, display%0A%0A for response_type, value in response.items():%0A if response_type == 'text':%0A print_success(value)%0A%0A if response_type == 'image':%0A image = Image(url=value)%0A display(image,)%0A
d8407723f9bf40ca166e5471e76c03c257bc71f9
Add lc208_implement_trie_prefix_tree.py
lc208_implement_trie_prefix_tree.py
lc208_implement_trie_prefix_tree.py
Python
0.000001
@@ -0,0 +1,1397 @@ +%22%22%22Leetcode 208. Implement Trie (Prefix Tree)%0AMedium%0A%0AURL: https://leetcode.com/problems/implement-trie-prefix-tree/%0A%0AImplement a trie with insert, search, and startsWith methods.%0A%0AExample:%0ATrie trie = new Trie();%0Atrie.insert(%22apple%22);%0Atrie.search(%22apple%22); // returns true%0Atrie.search(%22app%22); // returns false%0Atrie.startsWith(%22app%22); // returns true%0Atrie.insert(%22app%22); %0Atrie.search(%22app%22); // returns true%0A%0ANote:%0AYou may assume that all inputs are consist of lowercase letters a-z.%0AAll inputs are guaranteed to be non-empty strings.%0A%0AYour Trie object will be instantiated and called as such:%0Aobj = Trie()%0Aobj.insert(word)%0Aparam_2 = obj.search(word)%0Aparam_3 = obj.startsWith(prefix)%0A%22%22%22%0A%0Aclass Trie(object):%0A def __init__(self):%0A %22%22%22%0A Initialize your data structure here.%0A %22%22%22%0A pass%0A%0A def insert(self, word):%0A %22%22%22%0A Inserts a word into the trie.%0A :type word: str%0A :rtype: None%0A %22%22%22%0A pass%0A%0A def search(self, word):%0A %22%22%22%0A Returns if the word is in the trie.%0A :type word: str%0A :rtype: bool%0A %22%22%22%0A pass%0A%0A def startsWith(self, prefix):%0A %22%22%22%0A Returns if there is any word in the trie that starts with the given prefix.%0A :type prefix: str%0A :rtype: bool%0A %22%22%22%0A pass%0A%0A%0Adef main():%0A pass%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
1b00a597d8145b2df05054fef8d072d452209463
Make SurfaceHandler (for sfc data)
src/data/surface.py
src/data/surface.py
Python
0
@@ -0,0 +1,2243 @@ +from glob import glob%0A# Third-party modules%0Aimport pandas as pd%0A# Hand-made modules%0Afrom base import LocationHandlerBase%0A%0ASFC_REGEX_DIRNAME = %22sfc%5B1-5%5D%22%0AKWARGS_READ_CSV_SFC_MASTER = %7B%0A %22index_col%22: 0,%0A%7D%0AKWARGS_READ_CSV_SFC_LOG = %7B%0A %22index_col%22: 0,%0A %22na_values%22: %5B'', '%E3%80%80'%5D%0A%7D%0A%0A%0Aclass SurfaceHandler(LocationHandlerBase):%0A def __init__(self,%0A sfc_master_filepath,%0A sfc_file_prefix=%22sfc_%22,%0A sfc_file_suffix=%22.tsv%22):%0A super().__init__(sfc_master_filepath, **KWARGS_READ_CSV_SFC_MASTER)%0A self.sfc_file_prefix = sfc_file_prefix%0A self.sfc_file_suffix = sfc_file_suffix%0A self.SFC_REGEX_DIRNAME = SFC_REGEX_DIRNAME%0A%0A def read_tsv(self, path_or_buf):%0A df_ret = pd.read_csv(path_or_buf, **self.gen_read_csv_kwargs(KWARGS_READ_CSV_SFC_LOG))%0A df_ret.index = self.parse_datetime(pd.Series(df_ret.index).apply(str))%0A return df_ret%0A%0A def to_tsv(self, df, path_or_buf, **kwargs):%0A df.to_csv(path_or_buf, **self.gen_to_csv_kwargs(kwargs))%0A%0A def gen_filepath_list(self, aid_list):%0A sfc_regex_filepath_list = %5B%0A self.path.join(%0A self.INTERIM_DATA_BASEPATH,%0A self.SFC_REGEX_DIRNAME,%0A self.sfc_file_prefix + str(aid) + self.sfc_file_suffix%0A ) for aid in aid_list%0A %5D%0A%0A return %5B%0A sfc_file %5C%0A for sfc_regex_filepath in sfc_regex_filepath_list %5C%0A for sfc_file in glob(sfc_regex_filepath)%0A %5D%0A%0A def retrive_data(self, filepath_list, name_list):%0A if len(filepath_list) %3C 1:%0A raise ValueError(%22Empty list ?%22)%0A%0A df_ret = self.read_tsv(filepath_list%5B0%5D)%0A df_ret.columns = %5Bstr(col_name) + '_' + name_list%5B0%5D for col_name in df_ret.columns%5D%0A%0A if len(filepath_list) %3E 1:%0A for filepath, name in zip(filepath_list%5B1:%5D, name_list%5B1:%5D):%0A df_ret = df_ret.merge(%0A self.read_tsv(filepath),%0A how=%22outer%22,%0A left_index=True,%0A right_index=True,%0A suffixes=(%22.%22, %22_%7B%7D%22.format(name))%0A )%0A%0A return df_ret%0A%0A%0Aif __name__ == '__main__':%0A print(%22Surface!%22)%0A
c025cd6649e2326ade7b81df8408c4363fdb2050
add music handler
app/music_handler.py
app/music_handler.py
Python
0.000001
@@ -0,0 +1,419 @@ +#-*- coding:utf-8 -*-%0Afrom tools.httptools import Route%0Afrom models import Music%0A%[email protected](%22/music%22)%0Adef get_music_handler(app):%0A%09ret=%7B%7D;%0A%09ret%5B'code'%5D=200%0A%09ret%5B'msg'%5D='ok'%0A%09ret%5B'type'%5D=3%0A%09ret%5B'data'%5D=%5B%0A%09 %7B'music_name':'CountrintStars','music_url':'http://7xs7oc.com1.z0.glb.clouddn.com/music%252FJason%2520Chen%2520-%2520Counting%2520Stars.mp3'%7D,%0A%09%5D%0A%09return ret%[email protected](%22/music%22)%0Adef post_music_handler(app):%0A%09return 'ok'%0A%0A
3091555ca7fc421f886a1df1ac28f677feb70a53
Add default value for the fields object and field of the social network app model
app/migrations/0006_auto_20150825_1513.py
app/migrations/0006_auto_20150825_1513.py
Python
0
@@ -0,0 +1,757 @@ +# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('app', '0005_auto_20150819_1054'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='socialnetworkapp',%0A name='field_real_time_updates',%0A field=models.CharField(default=b'feed', max_length=50, null=True, blank=True),%0A preserve_default=True,%0A ),%0A migrations.AlterField(%0A model_name='socialnetworkapp',%0A name='object_real_time_updates',%0A field=models.CharField(default=b'page', max_length=100, null=True, blank=True),%0A preserve_default=True,%0A ),%0A %5D%0A
f05bd26c7a275c38c092c821e5ef62284c36e783
Test transormation matrices
test/test_interpolate.py
test/test_interpolate.py
Python
0
@@ -0,0 +1,707 @@ +import pywt%0Aimport sys%0Aimport numpy as np%0Afrom scipy.ndimage.interpolation import affine_transform%0Asys.path.insert(0, '../mlp_test')%0Afrom data_utils import load_mnist%0Afrom skimage import transform as tf%0A%0A%0A%0Atest_data = load_mnist()%5B2%5D%0A%0Achosen_index = 7%0A%0Atest_x_chosen = test_data%5B0%5D%5Bchosen_index%5D%0Atest_y_chosen = test_data%5B1%5D%5Bchosen_index%5D%0A%0Atransm = np.eye(28, k=0) + np.eye(28, k=1)%0A%0Apic_arr = test_x_chosen.reshape((28, 28))%0A%0Apic_trans = np.dot(pic_arr, transm)%0A%0Aimport matplotlib.pyplot as plt%0Aimport matplotlib.cm as cm%0A%0Aplt.subplot(2 , 1, 1)%0Aplt.imshow(pic_arr, cmap = cm.Greys_r,interpolation='nearest')%0Aplt.subplot(2 , 1, 2)%0Aplt.imshow(pic_trans, cmap = cm.Greys_r,interpolation='nearest')%0Aplt.show()
47cbcf130e76604ed93306f02fc2221a276d3bbf
Split out
pentai/gui/spacer.py
pentai/gui/spacer.py
Python
0.000462
@@ -0,0 +1,102 @@ +from kivy.uix.widget import Widget%0A%0Aclass HSpacer(Widget):%0A pass%0A%0Aclass VSpacer(Widget):%0A pass%0A%0A
e37a616d23805ced7250d4cdd6422751d8ae5143
Add populate_anticrispr.py
phageAPI/populate_anticrispr.py
phageAPI/populate_anticrispr.py
Python
0
@@ -0,0 +1,1615 @@ +#! /usr/bin/env python%0A%0Aimport os%0Afrom Bio import SeqIO%0Aimport textwrap%0A%0A%0Adef populate(sequences, AntiCRISPR):%0A for seq in sequences:%0A spacer, _ = AntiCRISPR.objects.get_or_create(%0A accession=seq.name,%0A sequence=str(seq.seq))%0A spacer.save()%0A%0A%0Adef main():%0A import argparse%0A%0A parser = argparse.ArgumentParser(description=textwrap.dedent(%22%22%22%5C%0A Import anticrispr sequences into the API DB.%0A%0A To use, first get the list of accession numbers from%0A https://www.nature.com/articles/nmicrobiol201685. This list is%0A available locally in %60data/antiCRISPR_accessions.txt%60, %0A%0A The script %60acc2gb.py%60 can then be used to download the antiCRISPR%0A protein sequence in fasta format, assuming you have NICB access:%0A%0A cat data/antiCRISPR_accessions.txt %7C python acc2gb.py [email protected] protein fasta %3E anticrispr.txt%0A%0A Finally, populate the database with the accession numbers in the%0A accession field and the sequences in the sequence field:%0A%0A cd phageAPI%0A populate_anticrispr.py ../anticrispr.txt%0A %22%22%22),%0A formatter_class=argparse.RawDescriptionHelpFormatter)%0A parser.add_argument('sequences', metavar='FILE', nargs=1,%0A help='path to sequences file, in fasta format')%0A args = parser.parse_args()%0A%0A os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'phageAPI.settings')%0A import django%0A django.setup()%0A from restapi.models import AntiCRISPR%0A%0A populate(SeqIO.parse(args.sequences%5B0%5D, 'fasta'), AntiCRISPR)%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
333cbe13d8104934a924f223427fa06a60a8b080
Create php4dvd_1.py
php4dvd/php4dvd_1.py
php4dvd/php4dvd_1.py
Python
0.000013
@@ -0,0 +1,1591 @@ +# -*- coding: utf-8 -*-%0Afrom selenium import webdriver%0Aimport unittest%0A%0Aclass Untitled(unittest.TestCase):%0A def setUp(self):%0A self.driver = webdriver.Firefox()%0A self.driver.implicitly_wait(30)%0A self.base_url = %22http://localhost:8080/%22%0A self.verificationErrors = %5B%5D%0A self.accept_next_alert = True%0A%0A def test_untitled(self):%0A driver = self.driver%0A driver.get(self.base_url + %22/php4dvd/%22)%0A driver.find_element_by_id(%22username%22).clear()%0A driver.find_element_by_id(%22username%22).send_keys(%22admin%22)%0A driver.find_element_by_name(%22password%22).clear()%0A driver.find_element_by_name(%22password%22).send_keys(%22admin%22)%0A driver.find_element_by_name(%22submit%22).click()%0A %0A def is_element_present(self, how, what):%0A try:%0A self.driver.find_element(by=how, value=what)%0A except NoSuchElementException, e: return False%0A return True%0A%0A def is_alert_present(self):%0A try:%0A self.driver.switch_to_alert()%0A except NoAlertPresentException, e: return False%0A return True%0A%0A def close_alert_and_get_its_text(self):%0A try:%0A alert = self.driver.switch_to_alert()%0A alert_text = alert.text%0A if self.accept_next_alert:%0A alert.accept()%0A else:%0A alert.dismiss()%0A return alert_text%0A finally: self.accept_next_alert = True%0A%0A def tearDown(self):%0A self.driver.quit()%0A self.assertEqual(%5B%5D, self.verificationErrors)%0A%0Aif __name__ == %22__main__%22:%0A unittest.main()%0A
d894e39e0280aaa45cef914f2202e978797b26fb
Update and rename 2 to 28.py
exercises/28.py
exercises/28.py
Python
0.0001
@@ -0,0 +1,212 @@ +'''%0AWrite a function find_longest_word()%0Athat takes a list of words and returns%0Athe length of the longest one.%0AUse only higher order functions.%0A'''%0A%0A%0Adef find_longest_word(lst):%0A return len(max(lst, key=len))%0A
4d9da24a0356bc63be6ab06eb084f97116d1dac4
should be functioning
cogs/packages.py
cogs/packages.py
Python
0.999455
@@ -0,0 +1,653 @@ +import discord%0Afrom cogs import common%0A%0Aclass Packages:%0A def __init__(self, bot):%0A self.bot = bot%0A def pkg_url(pkg):%0A %22%22%22Returns the URL for JSON data about a package on PyPI.%22%22%22%0A return f'https://pypi.python.org/pypi/%7Bpkg%7D/json'%0A%0A @commands.command()%0A async def pypi(self, ctx, pkg: str):%0A async with ctx.bot.session.get(pkg_url(pkg), headers=common.user_agent) as ps:%0A pjson = await ps.json()%0A pkg_s = discord.Embed(title=f'PyPI stats for %7Bpkg%7D', colour=0x690E8)%0A pkg_s.add_field(name='Version', value=pjson%5B'info'%5D%5B'version'%5D)%0A await ctx.send(embed=pkg)%0A
183b0c573478ff5e2480758abec629ddce4f0766
Create missing migration for model Meta changes in 9d1e29150407e906bc651a8249c53e5e6d1fb1e7.
atmo/jobs/migrations/0035_auto_20170529_1424.py
atmo/jobs/migrations/0035_auto_20170529_1424.py
Python
0
@@ -0,0 +1,450 @@ +# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.1 on 2017-05-29 14:24%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('jobs', '0034_auto_20170529_1424'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterModelOptions(%0A name='sparkjobrun',%0A options=%7B'get_latest_by': 'created_at', 'ordering': %5B'-created_at'%5D%7D,%0A ),%0A %5D%0A
b674ff31ab846bc4c11b615ad7f738ff176d5f96
Add /team test
picoCTF-web/tests/api/functional/v1/test_team.py
picoCTF-web/tests/api/functional/v1/test_team.py
Python
0
@@ -0,0 +1,1582 @@ +%22%22%22Tests for the /api/v1/team endpoints.%22%22%22%0A%0Afrom common import ( # noqa (fixture)%0A ADMIN_DEMOGRAPHICS,%0A clear_db,%0A client,%0A decode_response,%0A get_csrf_token,%0A register_test_accounts,%0A TEACHER_DEMOGRAPHICS,%0A USER_DEMOGRAPHICS,%0A get_conn%0A)%0A%0A%0Adef test_get_my_team(client):%0A %22%22%22Tests the /team endpoint.%22%22%22%0A clear_db()%0A register_test_accounts()%0A client.post('/api/v1/user/login', json=%7B%0A 'username': USER_DEMOGRAPHICS%5B'username'%5D,%0A 'password': USER_DEMOGRAPHICS%5B'password'%5D%0A %7D)%0A%0A expected_fields = %7B%0A 'achievements': %5B%5D,%0A 'affiliation': 'Sample School',%0A 'competition_active': False,%0A 'country': 'US',%0A 'eligible': True,%0A 'flagged_submissions': %5B%5D,%0A 'max_team_size': 1,%0A 'progression': %5B%5D,%0A 'score': 0,%0A 'size': 1,%0A 'solved_problems': %5B%5D,%0A 'team_name': 'sampleuser'%0A %7D%0A expected_member_fields = %7B%0A 'affiliation': 'None',%0A 'country': 'US',%0A 'email': '[email protected]',%0A 'firstname': 'Sample',%0A 'lastname': 'User',%0A 'username': 'sampleuser',%0A 'usertype': 'student'%0A %7D%0A res = client.get('/api/v1/team')%0A assert res.status_code == 200%0A for k, v in expected_fields.items():%0A assert res.json%5Bk%5D == v%0A%0A assert len(res.json%5B'members'%5D) == 1%0A for k, v in expected_member_fields.items():%0A assert res.json%5B'members'%5D%5B0%5D%5Bk%5D == v%0A%0A db = get_conn()%0A uid = db.users.find_one(%7B'username': USER_DEMOGRAPHICS%5B'username'%5D%7D)%5B'uid'%5D%0A assert res.json%5B'members'%5D%5B0%5D%5B'uid'%5D == uid%0A
d8c8287cce7ddc48f4ea271a54bd6efa8dcabe66
Create OutputNeuronGroup_multiple_outputs_1.py
examples/OutputNeuronGroup_multiple_outputs_1.py
examples/OutputNeuronGroup_multiple_outputs_1.py
Python
0
@@ -0,0 +1,2856 @@ +'''%0AExample of a spike receptor (only receives spikes)%0A%0AIn this example spikes are received and processed creating a raster plot at the end of the simulation.%0A%0A'''%0A%0Afrom brian import *%0Aimport numpy%0A%0Afrom brian_multiprocess_udp import BrianConnectUDP%0A%0A# The main function with the NeuronGroup(s) and Synapse(s) must be named %22main_NeuronGroup%22.%0A# It will receive two objects: input_Neuron_Group and the simulation_clock. The input_Neuron_Group%0A# will supply the input spikes to the network. The size of the spike train received equals NumOfNeuronsInput.%0A# The size of the output spike train equals NumOfNeuronsOutput and must be the same size of the NeuronGroup who is%0A# going to interface with the rest of the system to send spikes.%0A# The function must return all the NeuronGroup objects and all the Synapse objects this way:%0A# (%5Blist of all NeuronGroups%5D,%5Blist of all Synapses%5D)%0A# and the FIRST (index 0) NeuronGroup of the list MUST be the one where the OUTPUT spikes will be taken by the simulation.%0A# %0A# Here is also possible to use %22dummy%22 NeuronGroups only to receive and/or send spikes.%0A%0Amy_neuron_input_number = 45%0A%0Adef main_NeuronGroup(input_Neuron_Group, simulation_clock):%0A print %22main_NeuronGroup!%22 #DEBUG!%0A%0A simclock = simulation_clock%0A%0A Nr=NeuronGroup(my_neuron_input_number, model='v:1', reset=0, threshold=0.5, clock=simclock)%0A Nr.v=0%0A%0A # SYNAPSES BETWEEN REAL NEURON NETWORK AND THE INPUT%0A Syn_iNG_Nr=Synapses(input_Neuron_Group, Nr, model='w:1', pre='v+=w', clock=simclock)%0A%0A Syn_iNG_Nr%5B:,:%5D='i==j'%0A%0A print %22Total Number of Synapses:%22, len(Syn_iNG_Nr) #DEBUG!%0A%0A Syn_iNG_Nr.w=1%0A%0A MExt=SpikeMonitor(Nr) # Spikes sent by UDP%0A%0A Mdummy=SpikeMonitor(input_Neuron_Group) # Spikes received by UDP%0A%0A return (%5BNr%5D,%5BSyn_iNG_Nr%5D,%5BMExt,Mdummy%5D)%0A%0Adef post_simulation_function(input_NG, simulation_NG, simulation_SYN, simulation_MN):%0A %22%22%22%0A input_NG: the neuron group that receives the input spikes%0A simulation_NG: the neuron groups list passed to the system by the user function (main_NeuronGroup)%0A simulation_SYN: the synapses list passed to the system by the user function (main_NeuronGroup)%0A simulation_MN: the monitors list passed to the system by the user function (main_NeuronGroup)%0A%0A This way it is possible to plot, save or do whatever you want with these objects after the end of the simulation!%0A %22%22%22%0A pass%0A figure()%0A raster_plot(simulation_MN%5B1%5D)%0A title(%22Spikes Received by UDP%22)%0A show(block=True) %0A # savefig('output.pdf')%0A%0A%0A%0A%0Aif __name__==%22__main__%22:%0A%0A my_simulation = BrianConnectUDP(main_NeuronGroup, NumOfNeuronsInput=my_neuron_input_number, post_simulation_function=post_simulation_function,%0A input_addresses=%5B(%22127.0.0.1%22, 10101, my_neuron_input_number)%5D, simclock_dt=1, inputclock_dt=2, TotalSimulationTime=10000, sim_repetitions=0, brian_address=2)%0A
c70a127e17286f18e8d2d46bdc2e5ec6b0c55d0d
Add script to output statistics on body part emotion pairs
generate_body_part_emotion_pairs.py
generate_body_part_emotion_pairs.py
Python
0
@@ -0,0 +1,3033 @@ +%22%22%22Find known body parts in sentences with predicted label 'Lichaamsdeel'.%0A%0AExtended body parts are saved to new text files.%0A%0AUsage: python classify_body_parts.py %3Cjson file with body part mapping%3E %3Cdir%0Awith input texts%3E %3Cdir for output texts%3E%0A%22%22%22%0Aimport os%0Aimport codecs%0Aimport argparse%0Aimport json%0Aimport copy%0Afrom collections import Counter%0Afrom count_labels import load_data%0Afrom emotools.heem_utils import heem_body_part_labels, heem_emotion_labels%0Afrom count_labels import corpus_metadata%0Afrom genre2period import print_results_line_period%0A%0Adef get_emotion_body_part_pairs(file_name):%0A # load data set%0A X_data, Y_data = load_data(file_name)%0A%0A Y = %5Bs.split('_') for s in Y_data%5D%0A%0A emotions2body = %7B%7D%0A emotions = Counter()%0A%0A for labelset in Y:%0A body_parts = %5Blb for lb in labelset if lb in heem_body_part_labels%5D%0A emotion_lbls = %5Blb for lb in labelset if lb in heem_emotion_labels%5D%0A%0A if body_parts and emotion_lbls:%0A for em in emotion_lbls:%0A for bp in body_parts:%0A if not emotions2body.get(em):%0A emotions2body%5Bem%5D = Counter()%0A emotions2body%5Bem%5D%5Bbp%5D += 1%0A emotions%5Bem%5D += 1%0A return emotions, emotions2body%0A%0A%0Aif __name__ == '__main__':%0A parser = argparse.ArgumentParser()%0A parser.add_argument('file', help='csv file containing corpus metadata')%0A parser.add_argument('input_dir', help='the directory where the input text '%0A 'files can be found.')%0A args = parser.parse_args()%0A%0A f_name = args.file%0A input_dir = args.input_dir%0A%0A text2period, text2year, text2genre, period2text, genre2text = %5C%0A corpus_metadata(f_name)%0A%0A # statistics for entire corpus%0A global_emotions = Counter()%0A emotion_body_pairs = Counter()%0A period_counters = %7B%7D%0A%0A # process texts%0A text_files = %5Bt for t in os.listdir(input_dir) if t.endswith('.txt')%5D%0A for text_file in text_files:%0A text_id = text_file.replace('.txt', '')%0A in_file = os.path.join(input_dir, text_file)%0A%0A period = text2period.get(text_id)%0A%0A emotions, emotions2body = get_emotion_body_part_pairs(in_file)%0A%0A global_emotions.update(emotions)%0A%0A for em, body_counter in emotions2body.iteritems():%0A if not period_counters.get(em):%0A period_counters%5Bem%5D = %7B%7D%0A if not period_counters.get(em).get(period):%0A period_counters%5Bem%5D%5Bperiod%5D = Counter()%0A period_counters%5Bem%5D%5Bperiod%5D.update(body_counter)%0A%0A for em, freq in global_emotions.most_common():%0A print '%7B%7D%5Ct%7B%7D'.format(em, freq)%0A print 'Body part%5CtRenaissance%5CtClassisim%5CtEnlightenment%5CtNone%5CtTotal'%0A%0A merged_body_parts = Counter()%0A for c in period_counters.get(em):%0A merged_body_parts.update(period_counters.get(em).get(c))%0A%0A for label, freq in merged_body_parts.most_common():%0A print print_results_line_period(label, period_counters.get(em))%0A%0A print%0A print%0A
19ee2fbee238e94b7944154d692a9e488ee19a79
Add basic opps database configuration
opps/db/conf.py
opps/db/conf.py
Python
0
@@ -0,0 +1,557 @@ +#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0Afrom django.conf import settings%0A%0Afrom appconf import AppConf%0A%0A%0Aclass OppsDataBaseConf(AppConf):%0A%0A HOST = getattr(settings, 'OPPS_DB_HOSR', None)%0A USER = getattr(settings, 'OPPS_DB_USER', None)%0A PASSWORD = getattr(settings, 'OPPS_DB_PASSWORD', None)%0A PORT = getattr(settings, 'OPPS_DB_PORT', None)%0A NAME = getattr(settings, 'OPPS_DB_NAME', None)%0A TYPE = getattr(settings, 'OPPS_DB_TYPE', None)%0A OPTION = getattr(settings, 'OPPS_BD_OPTION', None)%0A%0A class Meta:%0A prefix = 'opps_db'%0A
d1c791ccf5b2873bbc248c9b079a5b68159ffb50
Add ECM Keys script
python/ecep/portal/management/commands/update_ecm.py
python/ecep/portal/management/commands/update_ecm.py
Python
0
@@ -0,0 +1,849 @@ +import csv%0Aimport os%0Aimport re%0A%0Afrom django.core.management.base import NoArgsCommand%0Afrom django.conf import settings%0A%0Afrom portal.models import Location%0A%0A%0Aclass Command(NoArgsCommand):%0A %22%22%22%0A Import Cleaned Site Name, Address, and ECM Keys%0A %22%22%22%0A%0A def handle(self, *args, **options):%0A%0A with open('master-list.csv', 'rb') as master:%0A reader = csv.DictReader(master)%0A%0A for row in reader:%0A try:%0A l = Location.objects.get(pk=int(row%5B'Portal ID'%5D))%0A l.site_name = row%5B'Master Site Name'%5D%0A l.address = row%5B'Master Address'%5D%0A l.ecm_key = row%5B'ECM Key'%5D%0A l.save()%0A print l.site_name%0A %0A except:%0A print %22Ruh roh!%22%0A continue%0A%0A
8ed1fccb2a1d72815bde93b19d45069e59db0900
add force404 sample
force404.py
force404.py
Python
0
@@ -0,0 +1,273 @@ +# -*- coding:utf-8 -*-%0D%0Afrom bottle import route, run, abort, error%0D%0A%0D%0A@route(%22/%22)%0D%0Adef top():%0D%0A abort(404, %22go to 404%22)%0D%0A return %22Hello world!%22%0D%0A%0D%0A@error(404)%0D%0Adef error404(error):%0D%0A return %22Not Found!%22%0D%0A%0D%0Arun(host=%220.0.0.0%22, port=8080, debug=True, reloader=True)
5d58200622e05728acce8ffba1ddf7e5063f556c
Create formatIO.py
formatIO.py
formatIO.py
Python
0.000003
@@ -0,0 +1,1576 @@ +%0A# %E5%B0%86%E8%BE%93%E5%85%A5%E8%BE%93%E5%87%BA%E6%A0%BC%E5%BC%8F%E5%8C%96%E4%BE%8B%E5%A6%82:(xxxx) -%3E (x)(x)(x)(x), (%5Bx%5D) -%3E x, %5Bxxxx%5D -%3E%5Bx%5D%5Bx%5D%5Bx%5D%5Bx%5D%0A%0Adef formatting(old_tune):%0A '''%0A %E6%A0%BC%E5%BC%8F%E5%8C%96%0A '''%0A new_tune = ''%0A sharped = False%0A low = high = 0%0A for i in old_tune:%0A if i == '(':%0A low = low + 1%0A elif i == '%5B':%0A high = high + 1%0A elif i == '%5D':%0A high = high - 1%0A elif i == ')':%0A low = low - 1%0A elif i == '#':%0A sharped = True%0A if low == high:%0A new_tune = new_tune + i%0A elif low %3E high:%0A new_tune = new_tune + '(' * (low - high) + i%0A elif low %3C high:%0A new_tune = new_tune + '%5B' * (high - low) + i%0A else:%0A return 'error'%0A else:%0A if sharped:%0A if low == high:%0A new_tune = new_tune + i%0A elif low %3E high:%0A new_tune = new_tune + i + ')' * (low - high)%0A elif low %3C high:%0A new_tune = new_tune + i + '%5D' * (low - high)%0A else:%0A return 'error'%0A sharped = False%0A else:%0A if low == high:%0A new_tune = new_tune + i%0A elif low %3E high:%0A new_tune = new_tune + '(' * (low - high) + i + ')' * (low - high)%0A elif low %3C high:%0A new_tune = new_tune + '%5B' * (high - low) + i + '%5D' * (low - high)%0A else:%0A return 'error'%0A print(new_tune)%0A return new_tune%0A
aff6ff82ec4fc0076f8356d782a2a103510ebbfd
use Queue for product and custom problem
product_custom/use_queue.py
product_custom/use_queue.py
Python
0
@@ -0,0 +1,626 @@ +%0A%0A# http://blog.jobbole.com/52412/ %0Afrom threading import Thread%0Aimport time%0Aimport random%0Afrom Queue import Queue%0A %0Aqueue = Queue(10)%0A %0Aclass ProducerThread(Thread):%0A def run(self):%0A nums = range(5)%0A while True:%0A num = random.choice(nums)%0A queue.put(num)%0A print %22Produced%22, num%0A time.sleep(random.random())%0A %0Aclass ConsumerThread(Thread):%0A def run(self):%0A while True:%0A num = queue.get()%0A queue.task_done()%0A print %22Consumed%22, num%0A time.sleep(random.random())%0A %0AProducerThread().start()%0AConsumerThread().start()%0A
b75a0293f214de4196d9df50ef5906885c2810fc
Create empClass.py
empClass.py
empClass.py
Python
0.000002
@@ -0,0 +1,449 @@ +from rgfunc import *%0A%0Aclass Employee(object):%0A%09year = 0%0A%09month = 0%0A%09day = 0%0A%09city = %22%22%0A%09country = %22%22%0A%09lastname = %22%22%0A%09%0A%0A%09%0A%09def __init__(self,name):%0A%09%09self.name = name%0A%09%09%0A%09%0A%09def dateofbirth(self):%0A%09%09return str(self.day)+%22/%22+str(self.month)+%22/%22+str(self.year)%0A%09#fullname = name,%22 %22,lastname%0A%09def fullname(self):%0A%09%09return str(self.name)+%22 %22+str(self.lastname)%0A%0A%0Aif __name__ == %22__main__%22:%0A%09print %22Error-Invalid File to Run- Please Run main.py.%22%0A%09exit()%0A
d9be3f189fc34117bdec6e0c7856f7a7dc5f902a
Add tool for generating the JSONP required by the documentation versions.
cdap-docs/tools/versionscallback-gen.py
cdap-docs/tools/versionscallback-gen.py
Python
0
@@ -0,0 +1,2140 @@ +#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0A#%0A# Copyright %C2%A9 2014 Cask Data, Inc.%0A#%0A# Used to generate JSONP from a CDAP documentation directory on a webserver.%0A# %0A# sudo echo %22versionscallback(%7B%5C%22development%5C%22: %5C%222.6.0-SNAPSHOT%5C%22, %5C%22current%5C%22: %5C%222.5.2%5C%22, %5C%22versions%5C%22: %5B%5C%222.5.1%5C%22, %5C%222.5.0%5C%22%5D%7D);%22 %3E json-versions.js; ls -l%0A%0Aimport sys%0Afrom os import getcwd, listdir, readlink%0Afrom os.path import isdir, islink, join%0A%0Adef add_value(call, name, value):%0A if value:%0A if call:%0A call += ', '%0A call += '%5C%5C%5C%22%25s%5C%5C%5C%22: %5C%5C%5C%22%25s%5C%5C%5C%22' %25 (name, value)%0A return call%0A%0A%0Adef add_object(call, name, value):%0A if value:%0A if call:%0A call += ', '%0A call += ('%5C%5C%5C%22%25s%5C%5C%5C%22: %25s' %25 (name, value)).replace(%22%5C'%22, '%5C%5C%5C%22')%0A return call%0A%0A%0Adef walk_directory(path=''):%0A global current, development, versions%0A%0A if not path:%0A path = getcwd()%0A %0A onlydirs = %5B d for d in listdir(path) if isdir(join(path,d)) %5D%0A onlydirs.reverse()%0A %0A for d in onlydirs:%0A if d == 'current':%0A d_path = join(path,d)%0A if islink(d_path):%0A current = readlink(d_path)%0A elif d.endswith('SNAPSHOT'):%0A development = d%0A elif d and d != current:%0A versions.append(d)%0A%0Adef build(path=''):%0A global current, development, versions%0A call = ''%0A%0A walk_directory(path)%0A%0A call = add_value(call, 'development', development)%0A call = add_value(call, 'current', current)%0A call = add_object(call, 'versions', versions)%0A%0A target = join(path, 'json-versions.js')%0A %0A print 'sudo echo %22versionscallback(%7B%25s%7D);%22 %3E %25s; ls -l' %25 (call, target)%0A%0Adef usage():%0A print 'Generates a command that creates the %22versionscallback%22 JSONP from a CDAP documentation directory on a webserver.'%0A print 'Run this with the path to the directory containing the documentation directories.'%0A print 'python %25s %3Cpath%3E' %25 sys.argv%5B0%5D%0A%0A# Main%0Aif __name__ == '__main__':%0A current = ''%0A development = ''%0A versions = %5B%5D%0A path = ''%0A if len(sys.argv) %3E 1:%0A path = sys.argv%5B1%5D%0A build(path)%0A else:%0A usage()%0A
fe1d75065f7371502cf81ea57e2a1019c2db093c
add config.py
custom_config.py
custom_config.py
Python
0.000002
@@ -0,0 +1,333 @@ +# ===== GAE dev_appserver.py settings =====%0A# %5BRequired%5D%0Agae_sdk_path = %22%22%0Aproject_path = %22%22%0A%0A%0A# %5BOptional%5D%0Adatastore_path = %22%22%0Aport = %22%22%0Aadmin_port = %22%22%0A%0A%0A# ===== GAE Helper settings =====%0A# %5BLog%5D%0Alog_path = %22%22%0Aappend_date_to_log = False%0A%0A# %5BRequest Filter%5D%0Afile_type_filter = %5B%5D%0Acustom_regex_filter = %5B%5D%0Ause_time_delimiter = False%0A
90c36d54f8822ef28bef98be4ba735d15b405648
add get_dump.py utility
get_dump.py
get_dump.py
Python
0.000002
@@ -0,0 +1,1464 @@ +#!/usr/bin/python%0Aimport argparse%0A%0Aimport mosquitto%0Aimport time, random%0Aimport sys%0A%0A%0A%0Adef on_mqtt_message(arg0, arg1, arg2=None):%0A #%0A #~ print %22on_mqtt_message%22, arg0, arg1, arg2%0A if arg2 is None:%0A mosq, obj, msg = None, arg0, arg1%0A else:%0A mosq, obj, msg = arg0, arg1, arg2%0A%0A%0A if msg.topic != retain_hack_topic:%0A print %22%25s%5Ct%25s%22 %25 (msg.topic, msg.payload)%0A else:%0A%0A%0A #~ print %22done!%22%0A client.disconnect()%0A sys.exit(0)%0A%0A%0Aif __name__ =='__main__':%0A parser = argparse.ArgumentParser(description='MQTT retained message deleter', add_help=False)%0A%0A parser.add_argument('-h', '--host', dest='host', type=str,%0A help='MQTT host', default='localhost')%0A%0A parser.add_argument('-p', '--port', dest='port', type=int,%0A help='MQTT port', default='1883')%0A%0A parser.add_argument('topic' , type=str,%0A help='Topic mask to unpublish retained messages from. For example: %22/devices/my-device/#%22')%0A%0A args = parser.parse_args()%0A%0A%0A client = mosquitto.Mosquitto()%0A client.connect(args.host, args.port)%0A client.on_message = on_mqtt_message%0A%0A%0A client.subscribe(args.topic)%0A%0A # hack to get retained settings first:%0A retain_hack_topic = %22/tmp/%25s/retain_hack%22 %25 ( client._client_id)%0A client.subscribe(retain_hack_topic)%0A client.publish(retain_hack_topic, '1')%0A%0A while 1:%0A rc = client.loop()%0A if rc != 0:%0A break%0A
e04d3bfd20879d0e8e404a3fff4ab37b914cd303
Add ContactForm
contact/forms.py
contact/forms.py
Python
0
@@ -0,0 +1,629 @@ +from django import forms%0Afrom django.core.exceptions import ValidationError%0A%0A%0Afrom prosodyauth.forms import PlaceholderForm%0Afrom simplecaptcha import captcha%0A%0A%0Acontact_reasons = (%0A ('question', 'Question'),%0A ('problem', 'Problem'),%0A ('suggestion', 'Suggestion'),%0A ('other', 'Other'),%0A )%0A%0Aclass ContactForm(PlaceholderForm):%0A username = forms.CharField(widget=forms.HiddenInput)%0A ip_address = forms.GenericIPAddressField(widget=forms.HiddenInput)%0A subject = forms.ChoiceField(choices=contact_reasons)%0A email = forms.EmailField()%0A message = forms.CharField(widget=forms.Textarea)%0A%0A
0aed5df2f7c08cdb365b098a93800b0269c0c6b4
Create class Dataset
gammacat/dataset.py
gammacat/dataset.py
Python
0.000002
@@ -0,0 +1,1194 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst%0Aimport logging%0Afrom .utils import load_yaml, write_yaml%0Afrom gammapy.catalog.gammacat import GammaCatResource%0A%0A__all__ = %5B%0A 'DataSet',%0A%5D%0A%0Alog = logging.getLogger(__name__)%0A%0Aclass DataSet:%0A %22%22%22Process a dataset file.%22%22%22%0A resource_type = 'ds'%0A%0A def __init__(self, data, resource):%0A log.debug('DataSet.__init__()')%0A self.resource = resource%0A self.data = data%0A%0A @classmethod%0A def read(cls, filename):%0A data = load_yaml(filename)%0A resource = cls._read_resource_info(data, filename)%0A return cls(data = data, resource = resource)%0A%0A def write(self, filename):%0A write_yaml(self.data, filename)%0A%0A def folder(self):%0A return self.data%5B'reference_id'%5D.replace('&', '%2526')%0A%0A @classmethod%0A def _read_resource_info(cls, data, location):%0A try:%0A file_id = data%5B'file_id'%5D%0A except:%0A file_id = -1%0A return GammaCatResource(%0A source_id = data%5B'source_id'%5D,%0A reference_id = data%5B'reference_id'%5D,%0A file_id = file_id,%0A type=cls.resource_type,%0A location=location%0A )
52f1805bdce7d6aa60047911b600d068cbee05e7
Add another test case for checkout
saleor/checkout/test_checkout.py
saleor/checkout/test_checkout.py
from django.contrib.auth.models import AnonymousUser from django.test import TestCase from mock import MagicMock, patch from . import BillingAddressStep, ShippingStep from ..checkout import STORAGE_SESSION_KEY from ..checkout.steps import BaseAddressStep from ..userprofile.models import Address NEW_ADDRESS = { 'first_name': 'Test', 'last_name': 'Test', 'street_address_1': 'Test', 'street_address_2': 'Test', 'city': 'Test', 'phone': '12345678', 'postal_code': '987654', 'country': 'PL', 'country_area': '', 'company_name': 'Test'} def test_base_address_step_works(rf): request = rf.post('/checkout/', NEW_ADDRESS) request.user = AnonymousUser() address = Address(**NEW_ADDRESS) step = BaseAddressStep(request, storage={}, address=address) assert step.forms_are_valid() assert step.address.first_name == 'Test' def test_billing_address_save_without_address(rf): data = dict(NEW_ADDRESS, email='[email protected]') request = rf.post('/checkout/', data) request.user = AnonymousUser() storage = {} step = BillingAddressStep(request, storage) assert step.process() is None assert isinstance(storage['address'], dict) assert storage['address']['first_name'] == 'Test' def test_billing_address_save_with_address_in_checkout(rf): data = dict(NEW_ADDRESS, email='[email protected]') request = rf.post('/checkout/', data) request.user = AnonymousUser() storage = {'address': {}} step = BillingAddressStep(request, storage) assert step.forms_are_valid() def test_shipping_step_save_without_address(rf): data = dict(NEW_ADDRESS, method='dummy_shipping') request = rf.post('/checkout/', data) request.user = AnonymousUser() request.session = {STORAGE_SESSION_KEY: {}} group = MagicMock() group.address = None storage = {'address': NEW_ADDRESS.copy()} step = ShippingStep(request, storage, group) assert step.forms_are_valid() step.save() assert isinstance(storage['address'], dict) def test_shipping_step_save_with_address_in_group(rf): data = dict(NEW_ADDRESS, method='dummy_shipping') request = rf.post('/checkout/', data) request.user = AnonymousUser() request.session = {} group = MagicMock() group.address = NEW_ADDRESS.copy() storage = {'address': NEW_ADDRESS.copy()} step = ShippingStep(request, storage, group) assert step.forms_are_valid() step.save() assert storage['address'] == NEW_ADDRESS def test_shipping_step_save_with_address_in_checkout(rf): data = dict(NEW_ADDRESS, method='dummy_shipping') request = rf.post('/checkout/', data) request.user = AnonymousUser() request.session = {} group = MagicMock() group.address = None storage = { 'address': { 'first_name': 'Change', 'last_name': 'Me', 'id': 10}} step = ShippingStep(request, storage, group) assert step.forms_are_valid() step.save() assert storage['address'] == NEW_ADDRESS def test_shipping_step_save_with_address_other_than_billing(rf): address_data = {'first_name': 'Billing Company LTD', 'last_name': 'Test', 'street_address_1': 'Test', 'street_address_2': 'Test', 'city': 'Test', 'phone': '12345678', 'postal_code': '987654', 'country': 'PL', 'country_area': '', 'company_name': 'Test'} data = dict( address_data, method='dummy_shipping', shipping_same_as_billing=False) request = rf.post('/checkout/', data) request.user = AnonymousUser() request.session = {} group = MagicMock() group.address = None storage = { 'address': { 'first_name': 'Change', 'last_name': 'Me', 'id': 10}} billing_address = Address(**address_data) step = ShippingStep(request, storage, group, billing_address) assert step.forms_are_valid() step.save() assert storage['address'] == address_data
Python
0
@@ -3127,16 +3127,25 @@ data = %7B +%0A 'first_n @@ -3173,16 +3173,16 @@ y LTD',%0A - @@ -3765,38 +3765,47 @@ 'first_name': ' -Change +Billing Address ',%0A ' @@ -3809,34 +3809,36 @@ 'last_name': ' -Me +Test ',%0A ' @@ -4001,32 +4001,32 @@ step.save()%0A - assert stora @@ -4055,8 +4055,916 @@ ss_data%0A +%0A%0Adef test_shipping_step_save_same_as_billing(rf):%0A address_data = %7B%0A 'first_name': 'Billing Company LTD',%0A 'last_name': 'Test',%0A 'street_address_1': 'Test',%0A 'street_address_2': 'Test',%0A 'city': 'Test',%0A 'phone': '12345678',%0A 'postal_code': '987654',%0A 'country': 'PL',%0A 'country_area': '',%0A 'company_name': 'Test'%7D%0A data = dict(%0A address_data,%0A method='dummy_shipping',%0A shipping_same_as_billing=True)%0A%0A request = rf.post('/checkout/', data)%0A request.user = AnonymousUser()%0A request.session = %7B%7D%0A group = MagicMock()%0A group.address = None%0A storage = %7B%0A 'address': NEW_ADDRESS%7D%0A step = ShippingStep(request, storage, group,%0A billing_address=Address(**NEW_ADDRESS))%0A assert step.forms_are_valid()%0A step.save()%0A assert storage%5B'address'%5D == NEW_ADDRESS%0A
828b065e857b5f148a0d20b06fd9d45824a1befc
add manager.py flask api for genmodel
genmodel/manager.py
genmodel/manager.py
Python
0
@@ -0,0 +1,1646 @@ +from flask import Flask, request, render_template, jsonify%0Aimport psycopg2%0Aimport os%0A%0A# Connect to Database%0Atry:%0A DB_NAME=os.environ%5B'DB_NAME'%5D%0A DB_USER=os.environ%5B'DB_USER'%5D%0A DB_PASS=os.environ%5B'DB_PASS'%5D%0Aexcept KeyError as e:%0A raise Exception('environment variables for database connection must be set')%0A%0Aconn = psycopg2.connect(dbname=DB_NAME,%0A user=DB_USER,%0A password=DB_PASS,%0A host=localhost,%0A port=5432%0A )%0A%0A%0A%0Aapp = Flask(__name__)%0A%0A%[email protected]('/')%0Adef man():%0A return 'Not implemented'%0A%[email protected]('/jobs', methods=%5B%22GET%22, %22POST%22%5D)%0Adef jobs():%0A if request.method == %22GET%22:%0A cur = conn.cursor()%0A cur.execute('SELECT * FROM jobs')%0A resp = cur.fetchall()%0A return resp %0A elif request.method == %22POST%22:%0A # Take a JSON with attributes of job, start job, then redirect to that%0A # job's monitoring page (jobs/job_id)%0A return 'Not implemented' %0A else:%0A return 'Not implemented'%0A%0A%[email protected]('/jobs/%3Cjob_id%3E', methods=%5B%22GET%22, %22PATCH%22, %22DELETE%22%5D)%0Adef job_for_id(job_id):%0A if request.method == %22GET%22:%0A # Job monitoring for a specific job%0A return 'GET job #' + job_id%0A elif request.method == %22PATCH%22:%0A # TODO: Should this be an endpoint?%0A # Modify job, scale resolvers%0A return 'PATCH job #' + job_id%0A elif request.method == %22DELETE%22:%0A # Remove all dedicated Digital Ocean containers, stop all publishers,%0A # writers and workers. Purge the queue.%0A return 'DELETE job #' + job_id%0A return job_id%0A%0A%0Aif __name__ == '__main__':%0A app.run(port=5000, host= '0.0.0.0', debug=True)%0A
7e28a3fe54c24a38a90bf0e7cf2f634ca78ee2ed
Add script used to generate a Cantor set
cantorset.py
cantorset.py
Python
0
@@ -0,0 +1,2582 @@ +### BEGIN LICENSE%0A# The MIT License (MIT)%0A#%0A# Copyright (C) 2015 Christopher Wells %[email protected]%3E%0A#%0A# Permission is hereby granted, free of charge, to any person obtaining a copy%0A# of this software and associated documentation files (the %22Software%22), to deal%0A# in the Software without restriction, including without limitation the rights%0A# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell%0A# copies of the Software, and to permit persons to whom the Software is%0A# furnished to do so, subject to the following conditions:%0A#%0A# The above copyright notice and this permission notice shall be included in%0A# all copies or substantial portions of the Software.%0A#%0A# THE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR%0A# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,%0A# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE%0A# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER%0A# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,%0A# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN%0A# THE SOFTWARE.%0A### END LICENSE%0A%22%22%22A script which visually draws a Cantor set.%22%22%22%0Aimport turtle%0Aimport time%0A%0Adef rec_draw(l, r, x, xd, t, pen):%0A%09%22%22%22Recursively draw each section of the Cantor set, until the set%0A%09 number of rows has been met.%22%22%22%0A%09if x %3C t:%0A%09%09# Draw the first full line, is redundant after first recursion%0A%09%09pen.up()%0A%09%09pen.goto(l, (-(x - 1) * xd))%0A%09%09pen.down()%0A%09%09pen.goto(r, (-(x - 1) * xd))%0A%0A%09%09# Find the length of each of the lesser lines%0A%09%09diff = (r - l) / 3%0A%0A%09%09# Draw the first lesser line (1/3)%0A%09%09pen.up()%0A%09%09pen.goto(l, -x * xd)%0A%09%09pen.down()%0A%09%09pen.goto(l + diff, -x * xd)%0A%09%09rec_draw(l, l + diff, x + 1, xd, t, pen)%0A%0A%09%09# Draw the second lesser line (3/3)%0A%09%09pen.up()%0A%09%09pen.goto(l + diff * 2, -x * xd)%0A%09%09pen.down()%0A%09%09pen.goto(r, -x * xd)%0A%09%09rec_draw(l + diff * 2, r, x + 1, xd, t, pen)%0A%09else:%0A%09%09# End once the given number of lines has been met%0A%09%09return%0A%0Adef main():%0A%09%22%22%22Draw a visual representation of a Cantor set.%22%22%22%0A%0A%09# Create the pen and set its initial values%0A%09pen = turtle.Turtle()%0A%09pen.ht()%0A%09pen.speed(0)%0A%0A%09# Set the values of the Cantor set%0A%09left = -200%09# The right boundry%0A%09right = 200%09# The left boundry%0A%09starting_row = 0%09# The location of the first row%0A%09row_distance = 10%09# The distance between rows%0A%09rows = 5%09# The number of rows%0A%0A%09# Draw the Cantor set%0A%09rec_draw(left, right, starting_row, row_distance, rows, pen)%0A%09time.sleep(500)%0A%0A# Run the main method of the script%0Aif __name__ == '__main__':%0A%09main()%0A
282383ab66f85ff6eb58b98c34558c02c9cf44eb
add a tool to list recipes used by builders (and ones not on recipes)
scripts/tools/builder_recipes.py
scripts/tools/builder_recipes.py
Python
0.000004
@@ -0,0 +1,2147 @@ +#!/usr/bin/env python%0A# Copyright 2015 The Chromium Authors. All rights reserved.%0A# Use of this source code is governed by a BSD-style license that can be%0A# found in the LICENSE file.%0A%0Aimport argparse%0Aimport json%0Aimport operator%0Aimport os%0Aimport subprocess%0Aimport sys%0Aimport tempfile%0A%0A%0ABASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(%0A os.path.abspath(__file__))))%0A%0A%0ABLACKLISTED_MASTERS = %5B%0A 'master.chromium.reserved',%0A 'master.chromiumos.unused',%0A 'master.client.reserved',%0A 'master.reserved',%0A 'master.tryserver.reserved',%0A%5D%0A%0A%0Adef getMasterConfig(path):%0A with tempfile.NamedTemporaryFile() as f:%0A subprocess.check_call(%5B%0A os.path.join(BASE_DIR, 'scripts', 'tools', 'runit.py'),%0A os.path.join(BASE_DIR, 'scripts', 'tools', 'dump_master_cfg.py'),%0A os.path.join(path),%0A f.name%5D)%0A return json.load(f)%0A%0A%0Adef main(argv):%0A parser = argparse.ArgumentParser()%0A parser.add_argument('--only-nonrecipe', action='store_true')%0A args = parser.parse_args()%0A%0A data = %5B%5D%0A%0A for master in os.listdir(os.path.join(BASE_DIR, 'masters')):%0A if master in BLACKLISTED_MASTERS:%0A continue%0A%0A path = os.path.join(BASE_DIR, 'masters', master)%0A if not os.path.isdir(path):%0A continue%0A%0A config = getMasterConfig(path)%0A for builder in config%5B'builders'%5D:%0A try:%0A recipe = builder%5B'factory'%5D%5B'properties'%5D.get(%0A 'recipe', %5B'%3Cno recipe%3E'%5D)%5B0%5D%0A except Exception as e:%0A recipe = '%3Cerror: %25r%3E' %25 e%0A%0A if (args.only_nonrecipe and%0A recipe != '%3Cno recipe%3E' and%0A not recipe.startswith('%3Cerror:')):%0A continue%0A data.append(%7B%0A 'master': master,%0A 'builder': builder%5B'name'%5D,%0A 'recipe': recipe,%0A %7D)%0A%0A master_padding = max(len(row%5B'master'%5D) for row in data)%0A builder_padding = max(len(row%5B'builder'%5D) for row in data)%0A%0A pattern = '%25%25-%25ds %7C %25%25-%25ds %7C %25%25s' %25 (master_padding, builder_padding)%0A for row in sorted(data, key=operator.itemgetter('master', 'builder')):%0A print pattern %25 (row%5B'master'%5D, row%5B'builder'%5D, row%5B'recipe'%5D)%0A%0A return 0%0A%0A%0Aif __name__ == '__main__':%0A sys.exit(main(sys.argv%5B1:%5D))%0A
bf3f14692b6e2a348f5a0171ad57e494801ed4f4
Add python script to write lib svm expected data format from my collected data
scripts/writelibsvmdataformat.py
scripts/writelibsvmdataformat.py
Python
0
@@ -0,0 +1,2659 @@ +%22%22%22%0AA script to write out lib svm expected data format from my collecting data%0A%22%22%22%0Aimport os%0Aimport sys%0Aimport csv%0Aimport getopt%0A%0Acmd_usage = %22%22%22%0A usage: writelibsvmdataformat.py --inputs=%22/inputs/csv_files%22 --output=%22/output/lib_svm_data%22%0A%22%22%22%0Afeature_space = 10%0A%0A%0Adef write_libsvm_data(input_files, output_file):%0A %22%22%22%0A%0A :param input_files: input files, each of which contains a single label at first row, and a bunch of data following%0A :param output_file: output file, which meet lib svm expected data format%0A %22%22%22%0A with open(output_file, 'wb') as output_csv_file:%0A output_writer = csv.writer(output_csv_file, delimiter=' ')%0A for input_file in input_files:%0A with open(input_file, 'rb') as input_csv_file:%0A input_reader = csv.reader(input_csv_file, delimiter=' ')%0A # assume there is only one item in each row%0A label = input_reader.next()%0A i = 1 # start from index 1%0A line = %5Blabel%5B0%5D%5D%0A for row in input_reader:%0A if int(row%5B0%5D) != 0:%0A line.append(':'.join(%5Bstr(i), row%5B0%5D%5D))%0A i += 1%0A if i %3E feature_space:%0A output_writer.writerow(line)%0A i = 1%0A line = %5Blabel%5B0%5D%5D%0A%0A%0Adef main(argv):%0A %22%22%22%0A :param argv: command line arguments%0A :rtype : error status, success 0 and fail 1%0A %22%22%22%0A try:%0A optlist, args = getopt.getopt(argv%5B1:%5D, %22hi:o:%22, %5B%22help%22, %22inputs=%22, %22output=%22%5D)%0A except getopt.GetoptError:%0A print(%22Command line arguments error, please try --help for help%22)%0A return 1%0A%0A for opt, opt_arg in optlist:%0A if opt in (%22-h%22, %22--help%22):%0A print cmd_usage%0A return 0%0A if opt in (%22-i%22, %22--inputs%22):%0A inputs = opt_arg%0A if not os.path.exists(inputs):%0A print(%22Input files folder not exist%22)%0A return 1%0A elif opt in (%22-o%22, %22--output%22):%0A output_file = opt_arg%0A%0A # print the messages%0A print(%22Inputs folder: %22 + inputs)%0A print(%22Output file: %22 + output_file)%0A assert isinstance(output_file, basestring)%0A assert isinstance(inputs, basestring)%0A input_files = %5B%5D%0A for root, dirs, files in os.walk(inputs):%0A for name in files:%0A if name.endswith('.csv'):%0A input_files.append(os.path.abspath(os.path.join(root, name)))%0A%0A if len(input_files) == 0:%0A print(%22No input files.%22)%0A return 1%0A%0A write_libsvm_data(input_files, output_file)%0A%0A%0Aif __name__ == %22__main__%22:%0A sys.exit(main(sys.argv))
900c93e6917ef92da02cca6865284e0004b01695
add file
aiovk/mixins.py
aiovk/mixins.py
Python
0.000001
@@ -0,0 +1,64 @@ +class LimitRateDriverMixin(object):%0A requests_per_second = 3%0A
a30277835e65195fc68e6708fe5da394bc43e08c
Test Projection
tests/test_projection.py
tests/test_projection.py
Python
0.000001
@@ -0,0 +1,302 @@ +from demosys.test import DemosysTestCase%0Afrom demosys.opengl import Projection%0A%0A%0Aclass ProjectionTest(DemosysTestCase):%0A%0A def test_create(self):%0A proj = Projection(fov=60, near=0.1, far=10)%0A proj.update(fov=75, near=1, far=100)%0A proj.tobytes()%0A proj.projection_constants%0A
1c2c7d5134780e58bd69f24ee06050b2f405d946
Add unit test for run_nohw
src/program/lwaftr/tests/subcommands/run_nohw_test.py
src/program/lwaftr/tests/subcommands/run_nohw_test.py
Python
0
@@ -0,0 +1,1991 @@ +%22%22%22%0ATest the %22snabb lwaftr run_nohw%22 subcommand.%0A%22%22%22%0A%0Aimport unittest%0A%0Afrom random import randint%0Afrom subprocess import call, check_call%0Afrom test_env import DATA_DIR, SNABB_CMD, BaseTestCase%0A%0Aclass TestRun(BaseTestCase):%0A%0A program = %5B%0A str(SNABB_CMD), 'lwaftr', 'run_nohw',%0A %5D%0A cmd_args = %7B%0A '--duration': '1',%0A '--bench-file': '/dev/null',%0A '--conf': str(DATA_DIR / 'icmp_on_fail.conf'),%0A '--inet-if': '',%0A '--b4-if': '',%0A %7D%0A veths = %5B%5D%0A%0A @classmethod%0A def setUpClass(cls):%0A cls.create_veth_pair()%0A%0A @classmethod%0A def create_veth_pair(cls):%0A veth0 = cls.random_veth_name()%0A veth1 = cls.random_veth_name()%0A%0A # Create veth pair.%0A check_call(('ip', 'link', 'add', veth0, 'type', 'veth', 'peer', %5C%0A 'name', veth1))%0A%0A # Set interfaces up.%0A check_call(('ip', 'link', 'set', veth0, 'up'))%0A check_call(('ip', 'link', 'set', veth1, 'up'))%0A%0A # Add interface names to class.%0A cls.veths.append(veth0)%0A cls.veths.append(veth1)%0A%0A @classmethod%0A def random_veth_name(cls):%0A return 'veth%25s' %25 randint(10000, 999999)%0A%0A def test_run_nohw(self):%0A self.execute_run_test(self.cmd_args)%0A%0A def execute_run_test(self, cmd_args):%0A self.cmd_args%5B'--inet-if'%5D = self.veths%5B0%5D%0A self.cmd_args%5B'--b4-if'%5D = self.veths%5B1%5D%0A output = self.run_cmd(self.build_cmd())%0A self.assertIn(b'link report', output,%0A b'%5Cn'.join((b'OUTPUT', output)))%0A%0A def build_cmd(self):%0A result = self.program%0A for item in self.cmd_args.items():%0A for each in item:%0A result.append(each)%0A return result%0A%0A @classmethod%0A def tearDownClass(cls):%0A cls.remove_veths()%0A%0A @classmethod%0A def remove_veths(cls):%0A for i in range(0, len(cls.veths), 2):%0A check_call(('ip', 'link', 'delete', cls.veths%5Bi%5D))%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
ff3e3e6be3a5a46db73a772f99071e83b9026d98
add wikipedia plugin
plugins/wiki.py
plugins/wiki.py
Python
0.000001
@@ -0,0 +1,606 @@ +import wikipedia%0A%0AMAX_LEN = 350%0A%[email protected]('wiki', 'wk', 'w')%0Adef wiki(argv):%0A %22%22%22wiki %5B-lang%5D %3Carticle%3E%22%22%22%0A lang = 'en'%0A if len(argv) %3C 2:%0A return%0A%0A # check if a language is given%0A argv = argv%5B1:%5D%0A if len(argv) %3E 1 and argv%5B0%5D.startswith('-'):%0A lang = argv%5B0%5D%5B1:%5D%0A argv = argv%5B1:%5D%0A%0A article = ' '.join(argv)%0A try:%0A wikipedia.set_lang(lang)%0A sum = wikipedia.summary(article)%0A except Exception as ex:%0A return %22Couldn't find an article for '%25s'%22 %25 article%0A if len(sum) %3E MAX_LEN:%0A sum = sum%5B:MAX_LEN-3%5D + '...'%0A return sum%0A
8fc91c780cf7f0b43deac69b0e60f2b9472af172
Add script to automatically setup ln -s for the utilities I use
set-links.py
set-links.py
Python
0
@@ -0,0 +1,1072 @@ +%22%22%22%0AHelper script to set up ln -s %3Cdesired utilities%3E on a given bin/ PATH.%0A%22%22%22%0Aimport os%0A%0A%0Autilities = (%0A 'mineutils/mc',%0A 'misc/gitmail',%0A 'misc/pipu',%0A 'misc/reclick',%0A)%0A%0A%0Adef run(program, *args):%0A %22%22%22Spawns a the given program as a subprocess and waits for its exit%22%22%22%0A # I for Invariant argument count, P for using PATH environmental variable%0A os.spawnlp(os.P_WAIT, program, program, *args)%0A%0A%0Aif __name__ == '__main__':%0A where = None%0A try:%0A import pyperclip%0A where = pyperclip.paste()%0A if where.startswith('file://'):%0A where = where%5Blen('file://'):%5D%0A%0A if not os.path.isdir(where):%0A where = None%0A except ImportError:%0A pass%0A%0A if not where:%0A where = input('Where should the links be created?%5Cn: ')%0A%0A if not os.path.isdir(where):%0A os.makedirs(where)%0A%0A utilities = tuple(os.path.abspath(x) for x in utilities)%0A os.chdir(where)%0A for utility in utilities:%0A print(f'Creating link for %7Butility%7D...')%0A run('ln', '-s', utility)%0A%0A print('Done!')%0A
7556fd9f55fe84a82a4843fb0ba43e7ad144e874
Update tendrl_definitions.py
tendrl/node_agent/persistence/tendrl_definitions.py
tendrl/node_agent/persistence/tendrl_definitions.py
from tendrl.bridge_common.etcdobj.etcdobj import EtcdObj from tendrl.bridge_common.etcdobj import fields class TendrlDefinitions(EtcdObj): """A table of the Os, lazily updated """ __name__ = '/tendrl_definitions_node_agent' data = fields.StrField("data") def render(self): self.__name__ = self.__name__ % self.node_uuid return super(TendrlDefinitions, self).render()
Python
0
@@ -272,137 +272,4 @@ a%22)%0A -%0A def render(self):%0A self.__name__ = self.__name__ %25 self.node_uuid%0A return super(TendrlDefinitions, self).render()%0A
907fa0a42dd90ca67d86e61ce7984d5764455fb9
add missing __init__.py
src/distribution/__init__.py
src/distribution/__init__.py
Python
0.001057
@@ -0,0 +1,1348 @@ +# -*- coding: iso-8859-1 -*-%0A# -----------------------------------------------------------------------------%0A# core.py - distutils functions for kaa packages%0A# -----------------------------------------------------------------------------%0A# $Id: distribution.py 2110 2006-11-29 00:41:31Z tack $%0A#%0A# -----------------------------------------------------------------------------%0A# Copyright (C) 2006 Dirk Meyer, Jason Tackaberry%0A#%0A# First Edition: Dirk Meyer %[email protected]%3E%0A# Maintainer: Dirk Meyer %[email protected]%3E%0A#%0A# Please see the file AUTHORS for a complete list of authors.%0A#%0A# This library is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Lesser General Public License version%0A# 2.1 as published by the Free Software Foundation.%0A#%0A# This library is distributed in the hope that it will be useful, but%0A# WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU%0A# Lesser General Public License for more details.%0A#%0A# You should have received a copy of the GNU Lesser General Public%0A# License along with this library; if not, write to the Free Software%0A# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA%0A# 02110-1301 USA%0A#%0A# -----------------------------------------------------------------------------%0A%0Afrom core import *%0A
92f63d6ad055aa213b67ad2778187faee1fde821
Add in printParents.py
printParents.py
printParents.py
Python
0.001401
@@ -0,0 +1,1585 @@ +%EF%BB%BFfrom types import *%0A%0A# https://stackoverflow.com/questions/2611892/get-python-class-parents%0Adef printParents(thing, ident = 2):%0A%09'''%0A%09Print out all the parents (till the ancestors) of a given class / object.%0A%09@param indent: Print indentation%0A%09'''%0A%09typ = type(thing)%0A%09if typ is ClassType:%0A%09%09printClassParents(thing, 0)%0A%09elif typ is InstanceType:%0A%09%09print(%22Object: %7B%7D%22.format(thing))%0A%09%09printClassParents(thing.__class__, 0)%0A%09else:%0A%09%09print(%22'%7B%7D' - '%7B%7D'%22.format(thing, type))%0A%09%09print(%22I don't know your parents.%22)%0A%0Adef printClassParents(cls, level = 0, indent = 2):%0A%09thisLevel = ' ' * indent * level + %22%7B%7D --%3E %7B%7B %7B%7D %7D%7D%22.format(%0A%09%09cls, ', '.join(str(c) for c in cls.__bases__))%0A%09print(thisLevel)%0A%09for base in cls.__bases__:%0A%09%09printClassParents(base, level + 1)%0A%0Aif __name__ == '__main__':%0A%09import sys%0A%0A%09def help(names):%0A%09%09print(%22Invalid arg: %7B%7D%5CnSyntax: modeul1.class1 module2.class2%22.format(names))%0A%0A%09if len(sys.argv) %3E 1:%0A%09%09# input args: module1.class1 module2.class2 ...%0A%09%09# eg. printParents.py Tkinter.Frame Tkinker.Button%0A%09%09# https://stackoverflow.com/questions/4821104/python-dynamic-instantiation-from-string-name-of-a-class-in-dynamically-imported%0A%09%09for names in sys.argv%5B1:%5D:%0A%09%09%09mc = names.split('.')%0A%09%09%09if len(mc) == 2:%0A%09%09%09%09# price you pay when you go dynamic%0A%09%09%09%09try:%0A%09%09%09%09%09ctor = getattr(__import__(mc%5B0%5D), mc%5B1%5D)%0A%09%09%09%09%09inst = ctor()%0A%09%09%09%09%09printParents(inst)%0A%09%09%09%09%09print('=' * 32)%0A%09%09%09%09except:%0A%09%09%09%09%09help(names)%0A%09%09%09else:%0A%09%09%09%09help(names)%0A%09else:%09%0A%09%09from ttk import *%0A%09%09button = Button()%0A%09%09printParents(button)%0A%09%09print('=' * 32)%0A%09%09printParents(Label)%0A%09%09print('=' * 32)%0A%09%09printParents(8)%0A
87565c1e6032bff2cc3e20f5c4f46b7a17977f7c
Add organisation for TFL Dial a Ride
migrations/versions/0098_tfl_dar.py
migrations/versions/0098_tfl_dar.py
Python
0
@@ -0,0 +1,666 @@ +%22%22%22empty message%0A%0ARevision ID: 0098_tfl_dar%0ARevises: 0097_notnull_inbound_provider%0ACreate Date: 2017-06-05 16:15:17.744908%0A%0A%22%22%22%0A%0A# revision identifiers, used by Alembic.%0Arevision = '0098_tfl_dar'%0Adown_revision = '0097_notnull_inbound_provider'%0A%0Afrom alembic import op%0Aimport sqlalchemy as sa%0Afrom sqlalchemy.dialects import postgresql%0A%0ATFL_DAR_ID = '1d70f564-919b-4c68-8bdf-b8520d92516e'%0A%0A%0Adef upgrade():%0A op.execute(%22%22%22INSERT INTO organisation VALUES (%0A '%7B%7D',%0A '',%0A 'tfl_dar_x2.png',%0A ''%0A )%22%22%22.format(TFL_DAR_ID))%0A%0A%0Adef downgrade():%0A op.execute(%22%22%22%0A DELETE FROM organisation WHERE %22id%22 = '%7B%7D'%0A %22%22%22.format(TFL_DAR_ID))%0A
22585d29220709dc3a3de16b03c626ca27c715ca
Add migration version? Not sure if this is right
migrations/versions/3025c44bdb2_.py
migrations/versions/3025c44bdb2_.py
Python
0.000006
@@ -0,0 +1,292 @@ +%22%22%22empty message%0A%0ARevision ID: 3025c44bdb2%0ARevises: None%0ACreate Date: 2014-12-16 12:13:55.759378%0A%0A%22%22%22%0A%0A# revision identifiers, used by Alembic.%0Arevision = '3025c44bdb2'%0Adown_revision = None%0A%0Afrom alembic import op%0Aimport sqlalchemy as sa%0A%0A%0Adef upgrade():%0A pass%0A%0A%0Adef downgrade():%0A pass%0A
e21657f377cab4319c1a3ce6fedc76d15c8e6c0a
UVA 11777 Automate the grades
cp-book/ch2/lineards/collections/_11777_AutomateTheGrades.py
cp-book/ch2/lineards/collections/_11777_AutomateTheGrades.py
Python
0.999999
@@ -0,0 +1,1157 @@ +# Problem name: 11777 Automate the Grades%0A# Problem url: https://uva.onlinejudge.org/external/117/11777.pdf%0A# Author: Andrey Yemelyanov%0A%0Aimport sys%0Aimport math%0A%0Adef readline():%0A%09return sys.stdin.readline().strip()%0A%0Adef main():%0A%09n_tests = int(readline())%0A%09for test in range(n_tests):%0A%09%09print(%22Case %7B%7D: %7B%7D%22.format((test + 1), get_final_grade(*%5Bint(x) for x in readline().split()%5D)))%0A%0Adef get_final_grade(term1, term2, final, attendance, class_test1, class_test2, class_test3):%0A%09class_test_grade = get_class_test_grade(class_test1, class_test2, class_test3)%0A%09total_grade = term1 + term2 + final + attendance + class_test_grade%0A%09return get_letter_grade(total_grade)%0A%0Adef get_class_test_grade(class_test1, class_test2, class_test3):%0A%09sorted_grades = %5Bclass_test1, class_test2, class_test3%5D%0A%09sorted_grades.sort()%0A%09return (sorted_grades%5B-1%5D + sorted_grades%5B-2%5D) / 2%0A%0Adef get_letter_grade(total_grade):%0A%09if total_grade %3E= 90:%0A%09%09return %22A%22%0A%09elif total_grade %3E= 80 and total_grade %3C 90:%0A%09%09return %22B%22%0A%09elif total_grade %3E= 70 and total_grade %3C 80:%0A%09%09return %22C%22%0A%09elif total_grade %3E= 60 and total_grade %3C 70:%0A%09%09return %22D%22%0A%09return %22F%22%0A%0Aif __name__==%22__main__%22:%0A main()%0A
8eaf6eca9c0bf1be8ef5b76e166098b1cd35cef7
remove bootloaderiness for easy compile
boards/NRF52832DK.py
boards/NRF52832DK.py
#!/bin/false # This file is part of Espruino, a JavaScript interpreter for Microcontrollers # # Copyright (C) 2013 Gordon Williams <[email protected]> # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. # # ---------------------------------------------------------------------------------------- # This file contains information for a specific board - the available pins, and where LEDs, # Buttons, and other in-built peripherals are. It is used to build documentation as well # as various source and header files for Espruino. # ---------------------------------------------------------------------------------------- import pinutils; info = { 'name' : "nRF52 Preview Development Kit", 'link' : [ "https://www.nordicsemi.com/Products/Bluetooth-Smart-Bluetooth-low-energy/nRF52832" ], # This is the PCA10036 'default_console' : "EV_SERIAL1", 'default_console_tx' : "D6", 'default_console_rx' : "D8", 'default_console_baudrate' : "9600", # Number of variables can be WAY higher on this board 'variables' : 2040, # How many variables are allocated for Espruino to use. RAM will be overflowed if this number is too high and code won't compile. 'bootloader' : 1, 'binary_name' : 'espruino_%v_nrf52832.bin', 'build' : { 'defines' : [ 'USE_BLUETOOTH' ] } }; chip = { 'part' : "NRF52832", 'family' : "NRF52", 'package' : "QFN48", 'ram' : 64, # Currently there is a bug with NRF52 preview DK's RAM but this will be fixed next revision. 'flash' : 512, 'speed' : 64, 'usart' : 1, 'spi' : 3, 'i2c' : 2, 'adc' : 1, 'dac' : 0, 'saved_code' : { 'address' : ((128 - 3) * 4096), 'page_size' : 4096, 'pages' : 0, 'flash_available' : (512 - 108 - 24) # Total flash - softdevice - bootloader (this one is code signed unlike nrF51). }, }; devices = { 'LED1' : { 'pin' : 'D17', 'inverted' : True }, 'LED2' : { 'pin' : 'D18', 'inverted' : True }, 'LED3' : { 'pin' : 'D19', 'inverted' : True }, 'LED4' : { 'pin' : 'D20', 'inverted' : True }, 'BTN1' : { 'pin' : 'D13', 'inverted' : True }, 'BTN2' : { 'pin' : 'D14', 'inverted' : True }, 'BTN3' : { 'pin' : 'D15', 'inverted' : True }, 'BTN4' : { 'pin' : 'D16', 'inverted' : True }, 'RX_PIN_NUMBER' : { 'pin' : 'D8'}, 'TX_PIN_NUMBER' : { 'pin' : 'D6'}, 'CTS_PIN_NUMBER' : { 'pin' : 'D7'}, 'RTS_PIN_NUMBER' : { 'pin' : 'D5'}, }; # left-right, or top-bottom order board = { 'left' : [ 'VDD', 'VDD', 'RESET', 'VDD','5V','GND','GND','PD3','PD4','PD28','PD29','PD30','PD31'], 'right' : [ 'PD27', 'PD26', 'PD2', 'GND', 'PD25','PD24','PD23', 'PD22','PD20','PD19','PD18','PD17','PD16','PD15','PD14','PD13','PD12','PD11','PD10','PD9','PD8','PD7','PD6','PD5','PD21','PD1','PD0'], }; board["_css"] = """ """; def get_pins(): pins = pinutils.generate_pins(0,31) # 32 General Purpose I/O Pins. pinutils.findpin(pins, "PD0", True)["functions"]["XL1"]=0; pinutils.findpin(pins, "PD1", True)["functions"]["XL2"]=0; pinutils.findpin(pins, "PD5", True)["functions"]["RTS"]=0; pinutils.findpin(pins, "PD6", True)["functions"]["TXD"]=0; pinutils.findpin(pins, "PD7", True)["functions"]["CTS"]=0; pinutils.findpin(pins, "PD8", True)["functions"]["RXD"]=0; pinutils.findpin(pins, "PD9", True)["functions"]["NFC1"]=0; pinutils.findpin(pins, "PD10", True)["functions"]["NFC2"]=0; pinutils.findpin(pins, "PD13", True)["functions"]["Button_1"]=0; pinutils.findpin(pins, "PD14", True)["functions"]["Button_2"]=0; pinutils.findpin(pins, "PD15", True)["functions"]["Button_3"]=0; pinutils.findpin(pins, "PD16", True)["functions"]["Button_4"]=0; pinutils.findpin(pins, "PD17", True)["functions"]["LED_1"]=0; pinutils.findpin(pins, "PD18", True)["functions"]["LED_2"]=0; pinutils.findpin(pins, "PD19", True)["functions"]["LED_3"]=0; pinutils.findpin(pins, "PD20", True)["functions"]["LED_4"]=0; pinutils.findpin(pins, "PD2", True)["functions"]["ADC1_IN0"]=0; pinutils.findpin(pins, "PD3", True)["functions"]["ADC1_IN1"]=0; pinutils.findpin(pins, "PD4", True)["functions"]["ADC1_IN2"]=0; pinutils.findpin(pins, "PD5", True)["functions"]["ADC1_IN3"]=0; pinutils.findpin(pins, "PD28", True)["functions"]["ADC1_IN4"]=0; pinutils.findpin(pins, "PD29", True)["functions"]["ADC1_IN5"]=0; pinutils.findpin(pins, "PD30", True)["functions"]["ADC1_IN6"]=0; pinutils.findpin(pins, "PD31", True)["functions"]["ADC1_IN7"]=0; #The boot/reset button will function as a reset button in normal operation. Pin reset on PD21 needs to be enabled on the nRF52832 device for this to work. return pins
Python
0
@@ -1292,16 +1292,17 @@ ompile.%0A +# 'bootlo
64c70f3f73d14d5bdd18cf5c4ad8b15ec745f517
Add helpful script for ascii checking - fyi @bruskiza
config/check_ascii.py
config/check_ascii.py
Python
0
@@ -0,0 +1,586 @@ +import json%0Afiles = %5B%22go-ussd_public.ibo_NG.json%22%5D%0A%0A%0Adef is_ascii(s):%0A return all(ord(c) %3C 128 for c in s)%0A%0Acurrent_message_id = 0%0Afor file_name in files:%0A json_file = open(file_name, %22rU%22).read()%0A json_data = json.loads(json_file)%0A print %22Proccessing %25s%5Cn-------%22 %25 file_name%0A%0A for key, value in json_data.items():%0A # Ignore non-content keys and empty keys%0A if len(value) == 2:%0A if not is_ascii(value%5B1%5D):%0A print (%22Non-ascii translation found of %3C%25s%3E: %25s%22 %25 (key, value%5B1%5D))%0A%0A print %22Done Proccessing %25s%5Cn-------%22 %25 file_name%0A
289ce4a720c5863f6a80e1b86083fd2919b52f14
Add file for tests of start symbol as not nonterminal
tests/startsymbol_tests/NotNonterminalTest.py
tests/startsymbol_tests/NotNonterminalTest.py
Python
0.000001
@@ -0,0 +1,258 @@ +#!/usr/bin/env python%0A%22%22%22%0A:Author Patrik Valkovic%0A:Created 10.08.2017 23:12%0A:Licence GNUv3%0APart of grammpy%0A%0A%22%22%22%0A%0Afrom unittest import TestCase, main%0Afrom grammpy import *%0A%0A%0Aclass NotNonterminalTest(TestCase):%0A pass%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
764f02e5e8c53b47cb2e28375a049accba442f0c
Create __init__.py
app/__init__.py
app/__init__.py
Python
0.000429
@@ -0,0 +1,45 @@ +# -*- encoding: utf-8 -*-%0A%0A# app/__init__.py%0A
d9291843d575e587efdd7aa0c4605fee766dc232
clean up test queries
examples/test_query.py
examples/test_query.py
Python
0.000007
@@ -0,0 +1,1573 @@ +from raco import RACompiler%0Afrom raco.language import CCAlgebra, MyriaAlgebra%0Afrom raco.algebra import LogicalAlgebra%0A%0Aimport logging%0Alogging.basicConfig(level=logging.DEBUG)%0ALOG = logging.getLogger(__name__)%0A%0A%0Adef testEmit(query, name):%0A LOG.info(%22compiling %25s%22, query)%0A%0A # Create a compiler object%0A dlog = RACompiler()%0A%0A # parse the query%0A dlog.fromDatalog(query)%0A #print dlog.parsed%0A LOG.info(%22logical: %25s%22,dlog.logicalplan)%0A%0A dlog.optimize(target=CCAlgebra, eliminate_common_subexpressions=False)%0A%0A LOG.info(%22physical: %25s%22,dlog.physicalplan%5B0%5D%5B1%5D)%0A%0A # generate code in the target language%0A code = dlog.compile()%0A %0A with open(name+'.c', 'w') as f:%0A f.write(code)%0A%0A%0Aqueries = %5B%0A(%22A(s1) :- T(s1)%22, %22scan%22),%0A(%22A(s1) :- T(s1), s%3E10%22, %22select%22),%0A(%22A(s1) :- T(s1), s%3E0, s%3C10%22, %22select_conjunction%22),%0A(%22A(s1,s2) :- T(s1,s2), s%3E10, s2%3E10%22, %22two_var_select%22),%0A(%22A(s1,o2) :- T(s1,p1,o1), R(o2,p1,o2)%22, %22join%22),%0A(%22A(a,b,c) :- R(a,b), S(b,c)%22, %22two_path%22),%0A(%22A(a,c) :- R(a,b), S(b,c)%22, %22two_hop%22),%0A(%22A(a,b,c) :- R(a,b), S(b,c), T(c,d)%22, %22three_path%22),%0A(%22A(a,b,c) :- R(a,b), S(b,c), T(c,a)%22, %22directed_triangles%22),%0A(%22A(s1,s2,s3) :- T(s1,s2,s3), R(s3,s4), s1%3Cs2, s4%3C100%22, %22select_then_join%22),%0A#(%22A(a,b,c) :- R(a,b), S(b,c), T(c,a), a%3Cb, b%3Cc%22, %22increasing_triangles%22),%0A#(%22A(s1,s2,s3) :- T(s1,s2,s3), R(s3,s4), s1%3Cs4%22, %22equi_and_range%22),%0A#(%22A(s1,s2,s3) :- T(s1,s2),R(s3,s4), s1%3Cs3%22, %22range_join%22),%0A#(%22A(a,b,c,d,e):-X(a,b),Y(a,c),Z(a,d,e),T(a,b),K(b,a)%22, %22complex_joins%22),%0A%5D%0A%0Afor q in queries:%0A query, name = q%0A testEmit(query, name)%0A%0A
acfa4877ac50a3895cc9f9cb2e349f948d4b8001
add a script to fetch official hero data from battle.net
bin/get_official_heroes.py
bin/get_official_heroes.py
Python
0
@@ -0,0 +1,869 @@ +import sys%0A%0Afrom selenium import webdriver%0Afrom selenium.common.exceptions import WebDriverException%0A%0A%22%22%22%0AThe official heroes listing on battle.net is populated by a list of%0AObjects defined in JS (window.heroes). This script fetches the full%0Alist and outputs a list of tuples relating official hero names to the%0Abattle.net slugs.%0A%0ATo run this script, you should install phantomjs in addition to the%0Aimport dependencies.%0A%22%22%22%0A%0Adef get_heroes_data():%0A # We prefer the PhantomJS driver to avoid opening any GUI windows.%0A browser = webdriver.PhantomJS()%0A browser.get(%22http://us.battle.net/heroes/en/heroes/#/%22)%0A heroes = browser.execute_script(%22return window.heroes;%22)%0A browser.quit()%0A%0A return heroes%0A%0Adef main():%0A heroes = get_heroes_data()%0A heroes = %5B(h%5B'name'%5D, h%5B'slug'%5D) for h in heroes%5D%0A print(heroes)%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
381c2537eff5003758d552281edfd885ee40ab80
Add migrations
sideloader/migrations/0003_auto_20141203_1708.py
sideloader/migrations/0003_auto_20141203_1708.py
Python
0.000001
@@ -0,0 +1,921 @@ +# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('sideloader', '0002_auto_20141203_1611'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='project',%0A name='build_script',%0A field=models.CharField(default=b'', max_length=255, blank=True),%0A preserve_default=True,%0A ),%0A migrations.AlterField(%0A model_name='project',%0A name='package_name',%0A field=models.CharField(default=b'', max_length=255, blank=True),%0A preserve_default=True,%0A ),%0A migrations.AlterField(%0A model_name='project',%0A name='postinstall_script',%0A field=models.CharField(default=b'', max_length=255, blank=True),%0A preserve_default=True,%0A ),%0A %5D%0A
f405829f9f4bed9c833f7e25dc97610e34b5dd71
Add JSONField tests
cornflake/tests/fields/test_json_field.py
cornflake/tests/fields/test_json_field.py
Python
0.000001
@@ -0,0 +1,717 @@ +import pytest%0A%0Afrom cornflake.fields import JSONField%0A%0A%[email protected](('value', 'expected'), %5B%0A (True, True),%0A (False, False),%0A (123, 123),%0A (123.456, 123.456),%0A ('foo', 'foo'),%0A (%5B'foo', 'bar'%5D, %5B'foo', 'bar'%5D),%0A (%7B'foo': 'bar'%7D, %7B'foo': 'bar'%7D)%0A%5D)%0Adef test_to_representation(value, expected):%0A assert JSONField().to_representation(value) == expected%0A%0A%[email protected](('data', 'expected'), %5B%0A (True, True),%0A (False, False),%0A (123, 123),%0A (123.456, 123.456),%0A ('foo', 'foo'),%0A (%5B'foo', 'bar'%5D, %5B'foo', 'bar'%5D),%0A (%7B'foo': 'bar'%7D, %7B'foo': 'bar'%7D)%0A%5D)%0Adef test_to_internal_value(data, expected):%0A assert JSONField().to_internal_value(data) == expected%0A
8df06647abc7e5125e88af68000f04ac9eca3290
add missing file
quadpy/_exception.py
quadpy/_exception.py
Python
0.000003
@@ -0,0 +1,39 @@ +class QuadpyError(Exception):%0A pass%0A
f641c8aa8e2eb5d98a90a10813fae6af4b136133
Add command that reindexes all tenants in parallel
bluebottle/clients/management/commands/reindex.py
bluebottle/clients/management/commands/reindex.py
Python
0.000001
@@ -0,0 +1,1124 @@ +from optparse import make_option%0A%0Aimport subprocess%0Afrom multiprocessing import Pool%0Afrom bluebottle.common.management.commands.base import Command as BaseCommand%0A%0Afrom bluebottle.clients.models import Client%0A%0A%0Adef reindex(schema_name):%0A print(f'reindexing tenant %7Bschema_name%7D')%0A return (%0A schema_name,%0A subprocess.call(%0A f'./manage.py tenant_command -s %7Bschema_name%7D search_index --rebuild -f',%0A shell=True%0A )%0A )%0A%0A%0Aclass Command(BaseCommand):%0A help = 'Reindex all tenants'%0A%0A option_list = BaseCommand.options + (%0A make_option(%0A '--processes',%0A default=8,%0A help='How many processes run in parallel'%0A ),%0A )%0A%0A def handle(self, *args, **options):%0A pool = Pool(processes=options%5B'processes'%5D)%0A%0A tasks = %5Bpool.apply_async(reindex, args=%5Bstr(tenant.schema_name)%5D) for tenant in Client.objects.all()%5D%0A%0A results = %5Bresult.get() for result in tasks%5D%0A%0A for tenant, result in results:%0A if result != 0:%0A print(f'Tenant failed to index: %7Btenant%7D')%0A%0A pool.close()%0A
1bfc53f5645d6dc7dbbdd020f23e86bebfdc2fc9
Add quick.py (quicksort)
python/quick.py
python/quick.py
Python
0.000001
@@ -0,0 +1,634 @@ +#!/usr/bin/env python3%0A%0Adef main():%0A arr = %5B%5D%0A fname = sys.argv%5B1%5D%0A with open(fname, 'r') as f:%0A for line in f:%0A arr.append(int(line.rstrip('%5Cr%5Cn')))%0A quicksort(arr, start=0, end=len(arr)-1)%0A print('Sorted list is: ', arr)%0A return%0A%0Adef quicksort(arr, start, end):%0A if end - start %3C 1:%0A return 0%0A b = start + 1%0A for i in range(start+1, end):%0A if arr%5Bi%5D %3C= arr%5Bstart%5D:%0A arr%5Bb%5D, arr%5Bi%5D = arr%5Bi%5D, arr%5Bb%5D%0A b += 1%0A arr%5Bstart%5D, arr%5Bb-1%5D = arr%5Bb-1%5D, arr%5Bstart%5D%0A quicksort(arr, start, b-1)%0A quicksort(arr, b, end)%0A%0Aif __name__ == '__main__':%0A main()%0A
25a6ad2a6b37bac4dd553c4e534092f2261d6037
Add response classes
client/responses.py
client/responses.py
Python
0.000001
@@ -0,0 +1,618 @@ +%0A%0Aclass SuccessResponse:%0A def __new__(cls, data=None, text=None, *args, **kwargs):%0A return %7B%0A 'status_code': 200,%0A 'ok': True,%0A 'data': data,%0A 'text': text%0A %7D%0A%0A%0Aclass NonSuccessResponse:%0A def __new__(cls, status=400, text=None, *args, **kwargs):%0A return %7B%0A 'status_code': status,%0A 'ok': True,%0A 'text': text%0A %7D%0A%0A%0Aclass ErrorResponse:%0A def __new__(cls, status=400, text=None, *args, **kwargs):%0A return %7B%0A 'status_code': status,%0A 'ok': False,%0A 'text': text%0A %7D%0A
d89bb401926698dc829be937d8f9c1959ecfd580
make ok,eq actual functions
cement/utils/test.py
cement/utils/test.py
"""Cement testing utilities.""" import unittest from ..core import backend, foundation # shortcuts from nose.tools import ok_ as ok from nose.tools import eq_ as eq from nose.tools import raises from nose import SkipTest class TestApp(foundation.CementApp): """ Basic CementApp for generic testing. """ class Meta: label = 'test' config_files = [] argv = [] class CementTestCase(unittest.TestCase): """ A sub-class of unittest.TestCase. """ ok = ok eq = eq def __init__(self, *args, **kw): super(CementTestCase, self).__init__(*args, **kw) def setUp(self): """ Sets up self.app with a generic TestApp(). Also resets the backend hooks and handlers so that everytime an app is created it is setup clean each time. """ self.app = self.make_app() def make_app(self, *args, **kw): """ Create a generic app using TestApp. Arguments and Keyword Arguments are passed to the app. """ self.reset_backend() return TestApp(*args, **kw) def reset_backend(self): """ Remove all registered hooks and handlers from the backend. """ for _handler in backend.handlers.copy(): del backend.handlers[_handler] for _hook in backend.hooks.copy(): del backend.hooks[_hook]
Python
0.000012
@@ -510,32 +510,8 @@ %22%22%22%0A - ok = ok%0A eq = eq%0A @@ -507,32 +507,32 @@ %22%22%22%0A %0A + def __init__ @@ -610,25 +610,29 @@ , **kw)%0A + %0A - def setU @@ -884,29 +884,25 @@ e_app()%0A - %0A + def make @@ -1442,24 +1442,24 @@ %5B_hook%5D %0A - @@ -1446,20 +1446,254 @@ ok%5D %0A +%0A def ok(self, expr, msg=None):%0A %22%22%22Shorthand for assert.%22%22%22%0A return ok(expr, msg)%0A %0A def eq(self, a, b, msg=None):%0A %22%22%22Shorthand for 'assert a == b, %22%25r != %25r%22 %25 (a, b)'. %22%22%22%0A return eq(a, b, msg)
8e1e6585c4bfa76ebbd945d765c6a4a3dc98025d
Add new package: dnstracer (#18933)
var/spack/repos/builtin/packages/dnstracer/package.py
var/spack/repos/builtin/packages/dnstracer/package.py
Python
0
@@ -0,0 +1,867 @@ +# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass Dnstracer(Package):%0A %22%22%22Dnstracer determines where a given Domain Name Server gets%0A its information from, and follows the chain of DNS servers back to%0A the servers which know the data.%22%22%22%0A%0A homepage = %22https://github.com/Orc/dnstracer%22%0A git = %22https://github.com/Orc/dnstracer.git%22%0A%0A version('master', branch='master')%0A%0A phases = %5B'configure', 'build', 'install'%5D%0A%0A def configure(self, spec, prefix):%0A configure = Executable('./configure.sh')%0A configure('--prefix=%7B0%7D'.format(prefix))%0A%0A def build(self, spec, prefix):%0A make()%0A%0A def install(self, spec, prefix):%0A make('install')%0A
2f889b045c1a03b3b046127380f15909ea117265
add new package (#25844)
var/spack/repos/builtin/packages/py-kornia/package.py
var/spack/repos/builtin/packages/py-kornia/package.py
Python
0
@@ -0,0 +1,716 @@ +# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass PyKornia(PythonPackage):%0A %22%22%22Open Source Differentiable Computer Vision Library for PyTorch.%22%22%22%0A%0A homepage = %22https://www.kornia.org/%22%0A pypi = %22kornia/kornia-0.5.10.tar.gz%22%0A%0A version('0.5.10', sha256='428b4b934a2ba7360cc6cba051ed8fd96c2d0f66611fdca0834e82845f14f65d')%0A%0A depends_on('[email protected]:', type=('build', 'run'))%0A depends_on('py-setuptools', type='build')%0A depends_on('py-pytest-runner', type='build')%0A depends_on('[email protected]:', type=('build', 'run'))%0A
e73aac38882b90e7219035800b400c2ed1e181ef
add http data wrapper that can be used to specify options for a specific request
robj/lib/httputil.py
robj/lib/httputil.py
Python
0
@@ -0,0 +1,3214 @@ +#%0A# Copyright (c) 2010 rPath, Inc.%0A#%0A# This program is distributed under the terms of the MIT License as found %0A# in a file called LICENSE. If it is not present, the license%0A# is always available at http://www.opensource.org/licenses/mit-license.php.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# without any waranty; without even the implied warranty of merchantability%0A# or fitness for a particular purpose. See the MIT License for full details.%0A#%0A%0A%22%22%22%0AModule for httplib customizations.%0A%22%22%22%0A%0Afrom robj.lib import util%0A%0Aclass HTTPData(object):%0A __slots__ = ('data', 'method', 'size', 'headers', 'contentType', 'callback',%0A 'chunked', 'bufferSize', 'rateLimit', )%0A%0A CHUNK_SIZE = 262144%0A BUFFER_SIZE = 8192%0A%0A def __init__(self, data=None, method=None, size=None, headers=None,%0A contentType=None, callback=None, chunked=None, bufferSize=None,%0A rateLimit=None):%0A%0A if headers is None:%0A headers = %7B%7D%0A%0A if data is not None:%0A if hasattr(data, 'read'):%0A if chunked:%0A headers%5B'Transfer-Encoding'%5D = 'Chunked'%0A else:%0A data = data.encode('utf-8')%0A size = len(data)%0A%0A self.method = method%0A self.data = data%0A self.headers = headers%0A self.size = size%0A self.contentType = contentType%0A self.callback = callback%0A self.chunked = chunked%0A self.bufferSize = bufferSize or self.BUFFER_SIZE%0A self.rateLimit = rateLimit%0A%0A def iterheaders(self):%0A for k, v in sorted(self.headers.iteritems()):%0A yield k, str(v)%0A # Don't send a Content-Length header if chunking%0A if not self.chunked and self.size is not None:%0A yield 'Content-Length', str(self.size)%0A if self.contentType is not None:%0A yield 'Content-Type', self.contentType%0A%0A def writeTo(self, connection):%0A if self.data is None:%0A return%0A%0A if not hasattr(self.data, 'read'):%0A connection.send(self.data)%0A return%0A%0A if not self.chunked:%0A util.copyfileobj(self.data, connection, bufSize=self.bufferSize,%0A callback=self.callback, rateLimit=self.rateLimit,%0A sizeLimit=self.size)%0A return%0A%0A assert self.size is not None%0A%0A # keep track of the total amount of data sent so that the%0A # callback passed in to copyfileobj can report progress correctly%0A sent = 0%0A chunk = self.CHUNK_SIZE%0A while self.size - sent:%0A if chunk %3E self.size - sent:%0A chunk = self.size - sent%0A%0A # first send the hex-encoded size%0A connection.send('%25x%5Cr%5Cn' %25 chunk)%0A%0A # then the chunk of data%0A util.copyfileobj(self.data, connection, bufSize=chunk,%0A callback=self.callback, rateLimit=self.rateLimit,%0A sizeLimit=chunk, total=sent)%0A%0A # send %5Cr%5Cn after the chunked data%0A connection.send(%22%5Cr%5Cn%22)%0A%0A sent += chunk%0A # terminate the chunked encoding%0A connection.send('0%5Cr%5Cn%5Cr%5Cn')%0A%0A%0Adef isHTTPData(obj):%0A return isinstance(obj, HTTPData)%0A
bc2abe4c295a371358064952e6c3afa395a4bd13
Rename Longest-Common-Prefix.py to LongestCommonPrefixtwo.py
leetcode/14.-Longest-Common-Prefix/LongestCommonPrefixtwo.py
leetcode/14.-Longest-Common-Prefix/LongestCommonPrefixtwo.py
Python
0.999999
@@ -0,0 +1,712 @@ +#!/usr/bin/python%0A#_*_ coding:utf-8 _*_%0A%0Aclass Solution(object): %0A def longestCommonPrefix(self, strs): %0A %22%22%22 %0A :type strs: List%5Bstr%5D %0A :rtype: str %0A %22%22%22%0A strNum=len(strs) #%E5%AD%97%E7%AC%A6%E4%B8%B2%E7%9A%84%E9%95%BF%E5%BA%A6%0A if strNum == 0 or strs == None:%0A return ''%0A else:%0A prefix = strs%5B0%5D #%E5%AF%B9%E5%89%8D%E7%BC%80%E8%BF%9B%E8%A1%8C%E8%B5%8B%E5%80%BC%0A for i in range(strNum):%0A if prefix == '' or strs%5Bi%5D == '':%0A return ''%0A comlen = min(len(prefix),len(strs%5Bi%5D)) #%E5%87%8F%E5%B0%91%E5%AF%BB%E6%89%BE%E5%85%AC%E5%85%B1%E6%9C%80%E5%B0%8F%E5%89%8D%E7%BC%80%E7%9A%84%E9%95%BF%E5%BA%A6%0A j = 0%0A while j %3C comlen and prefix%5Bj%5D == strs%5Bi%5D%5Bj%5D: #%E5%AF%BB%E6%89%BE%E5%AF%BB%E6%89%BE%E5%85%AC%E5%85%B1%E6%9C%80%E5%B0%8F%E5%89%8D%E7%BC%80%0A j += 1%0A prefix = prefix%5B0:j%5D%0A return prefix%0A
5f9c9500296627a94221ecd9614209a2c791e8b9
remove pointless condition
plugins/messages.py
plugins/messages.py
import random class Message: def __init__(self, sent, msg): self.sent = sent self.msg = msg def replyFormat(self): return 'From {user}: {msg}'.format(user = self.sent, msg = self.msg) class MessageDatabase: def __init__(self): self.messages = {} def pendingMessages(self, user): cnt = len(self.messages[user]) return 'You have {nr} message{s} waiting for you.\nUse ~read [number] to get [number] of messages shown to you'.format(nr = cnt, s = 's' if cnt > 1 else '') def addMessage(self, to, sent, msg): if to not in self.messages: self.messages[to] = {} if sent in self.messages[to]: return False self.messages[to][sent] = Message(sent, msg) return True def getMessage(self, user): return self.removeRandomMessage(user).replyFormat() def getMessages(self, user, amnt): ''' This removes amnt number of messages from the message service ''' # This can be super-spammy for users with a lot of pending messages # as they can opt to look at all at once reply = '' if amnt > len(self.messages[user]): amnt = len(self.messages[user]) while amnt > 0 and len(self.messages[user]) > 0: reply += self.getMessage(user) + ('\n' if amnt > 1 else '') amnt -= 1 # Remove the user from the list if there's no messages left if not self.messages[user]: self.messages.pop(user) return reply def getAllMessages(self, user): ''' This gets and delete every message to this user from storage ''' # No need to test for existance, this assumes a message exists # and usage should first test for existance. messages = self.removeAllMessages(user) combine = [] for msg in messages: combine.append(messages[msg].replyFormat()) return '\n'.join(combine) def hasMessage(self, user): return user in self.messages def alreadySentMessage(self, user, frm): return user in self.messages and frm in self.messages[user] def removeRandomMessage(self, to): return self.messages[to].pop(random.choice(list(self.messages[to].keys())), None) # Unused but still supported def removeAllMessages(self, to): return self.messages.pop(to, None)
Python
0.000581
@@ -1206,41 +1206,8 @@ nt %3E - 0 and len(self.messages%5Buser%5D) %3E 0:%0A @@ -1797,17 +1797,16 @@ essages: - %0A @@ -1853,22 +1853,16 @@ ormat()) - %0A
badbf8c89216b97ac29ea3582d99d28535f82a7e
Update __init__.py
slither/__init__.py
slither/__init__.py
from slither import slither __all__ = ['slither','Mouse','Stage','Sprite','Sound']
Python
0.000072
@@ -1,20 +1,14 @@ from -slither +. import
79df8ab80e6b14f16af125895f5b7338e5c41a60
add IOTools
base/IOTools.py
base/IOTools.py
Python
0.000001
@@ -0,0 +1,2117 @@ +import ROOT%0Aimport os%0A%0A%0Aclass Writer:%0A def __init__(self, directory=None):%0A %22%22%22%0A%0A :param directory:%0A %22%22%22%0A if directory is None:%0A directory = os.path.abspath(os.curdir)%0A self.dir = directory%0A self.__check_and_create_directory(self.dir)%0A%0A def __check_and_create_directory(self, directory):%0A _logger.debug(%22Check if directory: %25s exists%22 %25 (directory))%0A if not os.path.exists(directory):%0A _logger.debug(%22Create directory: %25s exists%22 %25 (directory))%0A os.makedirs(directory)%0A%0A def dump_canvas(self, canvas, message=None, image=None):%0A if image:%0A self.write_canvas_to_file(canvas, image)%0A else:%0A if message is None:%0A image = raw_input(%22save canvas as (%3CRET%3E for skipping): %22)%0A else:%0A image = raw_input(message)%0A%0A if image:%0A self.write_canvas_to_file(canvas, image)%0A%0A def write_canvas_to_file(self, canvas, name, extension='pdf'):%0A ext = self.parse_extension_from_file_name(name)%0A if ext is not None:%0A extension = ext%0A name = ''.join(name.split('.')%5B0:-1%5D)%0A if not extension.startswith('.'):%0A extension = '.' + extension%0A if extension == '.root':%0A self.write_object_to_root_tile(canvas, name + extension)%0A else:%0A canvas.SaveAs(os.path.join(os.path.join(self.dir,%0A name + extension)))%0A%0A def write_object_to_root_tile(self, obj, filename, dir=''):%0A f = ROOT.gROOT.GetListOfFiles().FindObject(filename)%0A if not f:%0A f = ROOT.TFile.Open(filename, 'UPDATE')%0A d = f.GetDirectory(dir)%0A if not d:%0A d = make_root_dir(f, dir)%0A d.cd()%0A obj.Write()%0A%0A def parse_extension_from_file_name(self, name):%0A ext = name.split('.')%5B-1%5D%0A if ext is name:%0A return None%0A return ext%0A%0A def set_directory(self, directory):%0A self.__check_and_create_directory(directory)%0A self.dir = directory%0A
2786dd91b0bb7dc8849e3549ff40de28d72d40d5
add a django multi-database router
rdrf/rdrf/db.py
rdrf/rdrf/db.py
Python
0.000001
@@ -0,0 +1,2781 @@ +from io import StringIO%0Aimport os%0A%0Afrom django.core.management import call_command%0Afrom django.db import connections%0A%0Aclass RegistryRouter:%0A # Whether clinical db is configured at all.%0A one_db = %22clinical%22 not in connections%0A # Whether clinical db is configured to be the same as main db.%0A same_db = (one_db or%0A connections%5B%22default%22%5D.get_connection_params() ==%0A connections%5B%22clinical%22%5D.get_connection_params())%0A%0A clinical_models = (%0A (%22rdrf%22, %22clinical%22),%0A (%22rdrf%22, %22questionnaireresponsedata%22),%0A # fixme: move CDEFile to clinical database. This is just%0A # tricky with migrations.%0A # (%22rdrf%22, %22cdefile%22),%0A (%22rdrf%22, %22patientdata%22),%0A (%22rdrf%22, %22formprogress%22),%0A (%22rdrf%22, %22modjgo%22),%0A )%0A%0A @classmethod%0A def is_clinical(cls, app_label, model_name):%0A return (app_label, model_name) in cls.clinical_models%0A%0A def choose_db_model(self, model):%0A return self.choose_db(model._meta.app_label, model._meta.model_name)%0A%0A def choose_db(self, app_label, model_name):%0A clinical = self.is_clinical(app_label, model_name)%0A return %22clinical%22 if clinical and not self.one_db else %22default%22%0A%0A def db_for_read(self, model, **hints):%0A return self.choose_db_model(model)%0A%0A def db_for_write(self, model, **hints):%0A return self.choose_db_model(model)%0A%0A def allow_migrate(self, db, app_label, model_name=None, **hints):%0A return (db == %22default%22 and self.same_db or%0A db == self.choose_db(app_label, model_name))%0A%0A%0Adef reset_sql_sequences(apps):%0A %22%22%22%0A Executes the necessary SQL to reset the primary key counters for%0A all tables in %60apps%60.%0A %22%22%22%0A os.environ%5B'DJANGO_COLORS'%5D = 'nocolor'%0A commands = StringIO()%0A%0A for app in apps:%0A call_command('sqlsequencereset', app, stdout=commands)%0A%0A _execute_reset_sql_sequences(commands.getvalue().splitlines())%0A%0A%0Adef _execute_reset_sql_sequences(commands):%0A # this gets nasty because the --database option of%0A # sqlsequencereset command doesn't work.%0A clinical_tables = %5B%22_%22.join(m) for m in RegistryRouter.clinical_models%5D%0A def for_db(database):%0A def _for_db(command):%0A is_clinical = any(t in command for t in clinical_tables)%0A return (not command.startswith(%22SELECT%22) or%0A (database == %22default%22 and not is_clinical) or%0A (database == %22clinical%22 and is_clinical) or%0A (database == %22default%22 and %22clinical%22 not in connections))%0A return _for_db%0A%0A for database in %5B%22default%22, %22clinical%22%5D:%0A if database in connections:%0A cursor = connections%5Bdatabase%5D.cursor()%0A cursor.execute(%22%5Cn%22.join(filter(for_db(database), commands)))%0A
0b6f6a9fd3916d8a028d5c3ccf4ca4a0277b9781
Add arena class prototype
src/arena.py
src/arena.py
Python
0
@@ -0,0 +1,270 @@ +import jsonpickle%0A%0Aclass ArenaType():%0A%09Circle = 0%0A%09Square = 1%0A%0Aclass ArenaCoverType():%0A%09Soil = 0%0A%09Sand = 1%0A%09Grass = 2%0A%09Stone = 3%0A%0Aclass Arena():%0A%09def __init__(self, name, size, stype, cover):%0A%09%09self.name = name%0A%09%09self.size = size%0A%09%09self.type = stype%0A%09%09self.cover = cover
4b0a21dd813d58370805053e60376f64b5927cd9
Add tutorial for MakeNumpyDataFrame
tutorials/dataframe/df032_MakeNumpyDataFrame.py
tutorials/dataframe/df032_MakeNumpyDataFrame.py
Python
0
@@ -0,0 +1,828 @@ +## %5Cfile%0A## %5Cingroup tutorial_dataframe%0A## %5Cnotebook%0A## Read data from Numpy arrays into RDataFrame.%0A##%0A## %5Cmacro_code%0A## %5Cmacro_output%0A##%0A## %5Cdate March 2021%0A## %5Cauthor Stefan Wunsch (KIT, CERN)%0A%0Aimport ROOT%0Aimport numpy as np%0A%0A# Let's create some data in numpy arrays%0Ax = np.array(%5B1, 2, 3%5D, dtype=np.int32)%0Ay = np.array(%5B4, 5, 6%5D, dtype=np.float64)%0A%0A# Read the data with RDataFrame%0A# The column names in the RDataFrame are defined by the keys of the dictionary.%0A# Please note that only fundamental types (int, float, ...) are supported.%0Adf = ROOT.RDF.MakeNumpyDataFrame(%7B'x': x, 'y': y%7D)%0A%0A# You can now use the RDataFrame as usualy, e.g. add a column ...%0Adf = df.Define('z', 'x + y')%0A%0A# ... or print the content%0Adf.Display().Print()%0A%0A# ... or save the data as a ROOT file%0Adf.Snapshot('tree', 'df032_MakeNumpyDataFrame.root')%0A
93f852782a7148887b3dea0d52c5dde1e0fefb58
Fix --var=val arg handling.
src/python/m5/__init__.py
src/python/m5/__init__.py
# Copyright (c) 2005 The Regents of The University of Michigan # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Nathan Binkert # Steve Reinhardt import sys, os, time import __main__ briefCopyright = ''' Copyright (c) 2001-2006 The Regents of The University of Michigan All Rights Reserved ''' fullCopyright = ''' Copyright (c) 2001-2006 The Regents of The University of Michigan All Rights Reserved Permission is granted to use, copy, create derivative works and redistribute this software and such derivative works for any purpose, so long as the copyright notice above, this grant of permission, and the disclaimer below appear in all copies made; and so long as the name of The University of Michigan is not used in any advertising or publicity pertaining to the use or distribution of this software without specific, written prior authorization. THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE FOR ANY DAMAGES, INCLUDING DIRECT, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. ''' def sayHello(f): print >> f, "M5 Simulator System" print >> f, briefCopyright print >> f, "M5 compiled on", __main__.compileDate hostname = os.environ.get('HOSTNAME') if not hostname: hostname = os.environ.get('HOST') if hostname: print >> f, "M5 executing on", hostname print >> f, "M5 simulation started", time.ctime() sayHello(sys.stderr) # define this here so we can use it right away if necessary def panic(string): print >>sys.stderr, 'panic:', string sys.exit(1) def m5execfile(f, global_dict): # copy current sys.path oldpath = sys.path[:] # push file's directory onto front of path sys.path.insert(0, os.path.abspath(os.path.dirname(f))) execfile(f, global_dict) # restore original path sys.path = oldpath # Prepend given directory to system module search path. def AddToPath(path): # if it's a relative path and we know what directory the current # python script is in, make the path relative to that directory. if not os.path.isabs(path) and sys.path[0]: path = os.path.join(sys.path[0], path) path = os.path.realpath(path) # sys.path[0] should always refer to the current script's directory, # so place the new dir right after that. sys.path.insert(1, path) # find the m5 compile options: must be specified as a dict in # __main__.m5_build_env. import __main__ if not hasattr(__main__, 'm5_build_env'): panic("__main__ must define m5_build_env") # make a SmartDict out of the build options for our local use import smartdict build_env = smartdict.SmartDict() build_env.update(__main__.m5_build_env) # make a SmartDict out of the OS environment too env = smartdict.SmartDict() env.update(os.environ) # import the main m5 config code from config import * # import the built-in object definitions from objects import * args_left = sys.argv[1:] configfile_found = False while args_left: arg = args_left.pop(0) if arg.startswith('--'): # if arg starts with '--', parse as a special python option # of the format --<python var>=<string value> try: (var, val) = arg.split('=', 1) except ValueError: panic("Could not parse configuration argument '%s'\n" "Expecting --<variable>=<value>\n" % arg); eval("%s = %s" % (var, repr(val))) elif arg.startswith('-'): # if the arg starts with '-', it should be a simulator option # with a format similar to getopt. optchar = arg[1] if len(arg) > 2: args_left.insert(0, arg[2:]) if optchar == 'd': outdir = args_left.pop(0) elif optchar == 'h': showBriefHelp(sys.stderr) sys.exit(1) elif optchar == 'E': env_str = args_left.pop(0) split_result = env_str.split('=', 1) var = split_result[0] if len(split_result == 2): val = split_result[1] else: val = True env[var] = val elif optchar == 'I': AddToPath(args_left.pop(0)) elif optchar == 'P': eval(args_left.pop(0)) else: showBriefHelp(sys.stderr) panic("invalid argument '%s'\n" % arg_str) else: # In any other case, treat the option as a configuration file # name and load it. if not arg.endswith('.py'): panic("Config file '%s' must end in '.py'\n" % arg) configfile_found = True m5execfile(arg, globals()) if not configfile_found: panic("no configuration file specified!") if globals().has_key('root') and isinstance(root, Root): sys.stdout = file('config.ini', 'w') instantiate(root) else: print 'Instantiation skipped: no root object found.'
Python
0
@@ -5109,32 +5109,58 @@ g.split('=', 1)%0A + var = var%5B2:%5D%0A except V @@ -5306,20 +5306,20 @@ e -val( +xec %22%25s = %25s @@ -5338,17 +5338,16 @@ pr(val)) -) %0A eli
f36a3e4e6cfbc5d3aa14017dcfea6e0fc67514f0
add delete_environment command
ebs_deploy/commands/delete_environment_command.py
ebs_deploy/commands/delete_environment_command.py
Python
0.000003
@@ -0,0 +1,1161 @@ +from ebs_deploy import out, parse_env_config%0A%0A%0Adef add_arguments(parser):%0A %22%22%22%0A Args for the delete environment command%0A %22%22%22%0A parser.add_argument('-e', '--environment',%0A help='Environment name', required=True)%0A%0A%0Adef execute(helper, config, args):%0A %22%22%22%0A Deletes an environment%0A %22%22%22%0A%0A env_config = parse_env_config(config, args.environment)%0A cname_prefix = env_config.get('cname_prefix', None)%0A # env_name = args.environment%0A real_env_name = helper.environment_name_for_cname(cname_prefix)%0A%0A environments = helper.get_environments()%0A%0A for env in environments:%0A if env%5B'EnvironmentName'%5D == real_env_name:%0A if env%5B'Status'%5D != 'Ready':%0A out(%22Unable to delete %22 + env%5B'EnvironmentName'%5D%0A + %22 because it's not in status Ready (%22%0A + env%5B'Status'%5D + %22)%22)%0A else:%0A out(%22Deleting environment: %22+env%5B'EnvironmentName'%5D)%0A # helper.delete_environment(env%5B'EnvironmentName'%5D)%0A # environments_to_wait_for_term.append(env%5B'EnvironmentName'%5D)%0A%0A out(%22Environment deleted%22)%0A%0A return 0%0A
cf4e468ed28a7e750adfbcd41235ac5b90cb562b
Add new package: diffmark (#18930)
var/spack/repos/builtin/packages/diffmark/package.py
var/spack/repos/builtin/packages/diffmark/package.py
Python
0
@@ -0,0 +1,658 @@ +# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass Diffmark(AutotoolsPackage):%0A %22%22%22Diffmark is a DSL for transforming one string to another.%22%22%22%0A%0A homepage = %22https://github.com/vbar/diffmark%22%0A git = %22https://github.com/vbar/diffmark.git%22%0A%0A version('master', branch='master')%0A%0A depends_on('autoconf', type='build')%0A depends_on('automake', type='build')%0A depends_on('libtool', type='build')%0A depends_on('m4', type='build')%0A depends_on('libxml2')%0A
ddff4237ae0bb8dd2575265707a843f4497ccbf2
Create headache.py
headache.py
headache.py
Python
0.001409
@@ -0,0 +1,831 @@ +%22%22%22%0A%0Apython plaintext obfuscator%0A%0Aby n.bush%0A%0A%22%22%22%0Aimport string%0Aimport random%0A%0A%0Adef mess_maker(size=6, chars=string.ascii_uppercase + string.ascii_lowercase + string.digits):%0A return ''.join(random.choice(chars) for x in range(size))%0A%0Adef headache(text):%0A charlist = list(text)%0A obfuscated = %5B%5D%0A class_a = mess_maker(10)%0A class_b = mess_maker(10)%0A css = %22%22%22%0A %3Cstyle%3E%0A span.%25s %7B%7D%0A span.%25s %7Bcolor: transparent; letter-spacing:-1em;%7D%0A %3C/style%3E%0A %22%22%22 %25 (class_a, class_b)%0A obfuscated.append(css)%0A for i in charlist:%0A mess = mess_maker(10)%0A span = '%3Cspan class=%22%25s%22%3E%25s%3C/span%3E%3Cspan class=%22%25s%22%3E%25s%3C/span%3E' %25 (class_a, i, class_b, mess)%0A obfuscated.append(span)%0A return ''.join(obfuscated)%0A%0A%0Aprint headache(%22Hi. This is copyable. Not.%22)%0A
a7d6344428ef43374fb82f5b357968ec38402984
Create test_step_motor_Model_28BYJ_48p.py
test/test_step_motor_Model_28BYJ_48p.py
test/test_step_motor_Model_28BYJ_48p.py
Python
0.00001
@@ -0,0 +1,183 @@ +from gadgets.motors.step_motor import Model_28BYJ_48%0Ast_mot = Model_28BYJ_48(%5B11,15,16,18%5D)%0A%0Afor i in range(2):%0A st_mot.angular_step(60,direction=2,waiting_time=2,bi_direction=True) %0A
4585d6426a6c2945a359bbe02c58702a07e68746
Create new package. (#6209)
var/spack/repos/builtin/packages/r-gsubfn/package.py
var/spack/repos/builtin/packages/r-gsubfn/package.py
Python
0
@@ -0,0 +1,2399 @@ +##############################################################################%0A# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.%0A# Produced at the Lawrence Livermore National Laboratory.%0A#%0A# This file is part of Spack.%0A# Created by Todd Gamblin, [email protected], All rights reserved.%0A# LLNL-CODE-647188%0A#%0A# For details, see https://github.com/spack/spack%0A# Please also see the NOTICE and LICENSE files for our notice and the LGPL.%0A#%0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Lesser General Public License (as%0A# published by the Free Software Foundation) version 2.1, February 1999.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and%0A# conditions of the GNU Lesser General Public License for more details.%0A#%0A# You should have received a copy of the GNU Lesser General Public%0A# License along with this program; if not, write to the Free Software%0A# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA%0A##############################################################################%0Afrom spack import *%0A%0A%0Aclass RGsubfn(RPackage):%0A %22%22%22gsubfn is like gsub but can take a replacement function or%0A certain other objects instead of the replacement string. Matches%0A and back references are input to the replacement function and%0A replaced by the function output. gsubfn can be used to split%0A strings based on content rather than delimiters and for%0A quasi-perl-style string interpolation. The package also has%0A facilities for translating formulas to functions and allowing%0A such formulas in function calls instead of functions. This can%0A be used with R functions such as apply, sapply, lapply, optim,%0A integrate, xyplot, Filter and any other function that expects%0A another function as an input argument or functions like cat or%0A sql calls that may involve strings where substitution is%0A desirable.%22%22%22%0A%0A homepage = %22https://cran.r-project.org/package=gsubfn%22%0A url = %22https://cran.r-project.org/src/contrib/gsubfn_0.6-6.tar.gz%22%0A list_url = %22https://cran.r-project.org/src/contrib/Archive/gsubfn%22%0A%0A version('0.6-6', '94195ff3502706c736d9c593c07252bc')%0A%0A depends_on('r-proto', type=('build', 'run'))%0A
e76fa7d23894bb88d47b761f683b4bbd797ef889
Add Helpers object for cleaner helper syntax
knights/utils.py
knights/utils.py
Python
0
@@ -0,0 +1,190 @@ +%0Aclass Helpers:%0A '''%0A Provide a cheaper way to access helpers%0A '''%0A def __init__(self, members):%0A for key, value in members.items():%0A setattr(self, key, value)%0A
6c0e5b7823be4d2defc9f0ff7b4abe76bc6f9af7
sequence learner abc
marmot/learning/sequence_learner.py
marmot/learning/sequence_learner.py
Python
0.999952
@@ -0,0 +1,1016 @@ +# this is an abstract class representing a sequence learner, or 'structured' learner%0A# implementations wrap various sequence learning tools, in order to provide a consistent interface within Marmot%0A%0Afrom abc import ABCMeta, abstractmethod%0A%0Aclass SequenceLearner(object):%0A%0A __metaclass__ = ABCMeta%0A%0A # subclasses must provide the implementation%0A @abstractmethod%0A def fit(self, X, y):%0A '''%0A fit a sequence model to data in the format %5B%5Bseq1_w1, seq1_w2, ...%5D%5D,%0A :param X: a list of np.arrays, where each row in each array contains the features for an item in the sequence - X can be viewed as a 3d tensor%0A :param y: the true labels for each sequence%0A :return:%0A '''%0A pass%0A%0A @abstractmethod%0A def predict(self, X):%0A '''%0A predict the tag for each item in each sequence%0A :param X: list of sequences list of np.array%0A :return: list of lists, where each list contains the predictions for the test sequence%0A '''%0A pass
4fe6f81e1ce58474761b7bae673e92e1d08c75b3
required drilldown*s* not singular
cubes/backends/mixpanel/store.py
cubes/backends/mixpanel/store.py
# -*- coding=utf -*- from ...model import * from ...browser import * from ...stores import Store from ...errors import * from .mixpanel import * from string import capwords DIMENSION_COUNT_LIMIT = 100 time_dimension_md = { "name": "time", "levels": ["year", "month", "day", "hour"], "hierarchies": [ {"name":"mdh", "levels": ["year", "month", "day", "hour"]} ], "info": { "is_date": True } } _time_dimension = create_dimension(time_dimension_md) class MixpanelModelProvider(ModelProvider): def cube(self, name): """Creates a mixpanel cube with following variables: * `name` – cube name * `measures` – cube measures: `total` and `uniques` * `required_dimensions` – list of required dimension names * `mappings` – mapping of corrected dimension names Dimensions are Mixpanel's properties where ``$`` character is replaced by the underscore ``_`` character. """ result = self.store.request(["events", "properties", "top"], {"event":name, "limit":DIMENSION_COUNT_LIMIT}) if not result: raise NoSuchCubeError(name) names = result.keys() # Replace $ with underscore _ dims = ["time"] mappings = {} for dim_name in result.keys(): fixed_name = dim_name.replace("$", "_") if fixed_name != dim_name: mappings[fixed_name] = dim_name dims.append(fixed_name) measures = attribute_list(["total", "unique"]) for m in measures: m.aggregations = ['identity'] cube = Cube(name=name, measures=measures, required_dimensions=dims, store=self.store_name, mappings=mappings) # TODO: this is new (remove this comment) cube.category = self.store.category # TODO: required_drilldown might be a cube's attribute (fixed_dd?) cube.info = { "required_drilldown": "time", "category": cube.category } return cube def dimension(self, name): if name == "time": return _time_dimension level = Level(name, attribute_list([name])) dim = Dimension(name, levels=[level]) return dim def list_cubes(self): result = self.store.request(["events", "names"], {"type":"general", }) cubes = [] for name in result: label = capwords(name.replace("_", " ")) cube = { "name": name, "label": label, "category": self.store.category, "info": { "category": self.store.category } } cubes.append(cube) return cubes class MixpanelStore(Store): def __init__(self, api_key, api_secret, category=None): self.mixpanel = Mixpanel(api_key, api_secret) self.category = category or "Mixpanel" def model_provider_name(self): return "mixpanel" def request(self, *args, **kwargs): """Performs a mixpanel HTTP request. Raises a BackendError when mixpanel returns `error` in the response.""" response = self.mixpanel.request(*args, **kwargs) if "error" in response: raise BackendError("Mixpanel request error: %s" % response["error"]) return response
Python
0.99897
@@ -1946,16 +1946,17 @@ rilldown +s might b @@ -2049,25 +2049,28 @@ rilldown +s %22: +%5B %22time%22 +%5D ,%0A
2838711c7fa12525c2ae6670bb130999654fe7ea
add shortest-palindrome
vol5/shortest-palindrome/shortest-palindrome.py
vol5/shortest-palindrome/shortest-palindrome.py
Python
0.999999
@@ -0,0 +1,580 @@ +#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A# @Author: Zeyuan Shang%0A# @Date: 2015-11-19 20:43:07%0A# @Last Modified by: Zeyuan Shang%0A# @Last Modified time: 2015-11-19 20:43:21%0Aclass Solution(object):%0A def shortestPalindrome(self, s):%0A %22%22%22%0A :type s: str%0A :rtype: str%0A %22%22%22%0A ss = s + '#' + s%5B::-1%5D%0A n = len(ss)%0A p = %5B0%5D * n%0A for i in xrange(1, n):%0A j = p%5Bi - 1%5D%0A while j %3E 0 and ss%5Bi%5D != ss%5Bj%5D:%0A j = p%5Bj - 1%5D%0A p%5Bi%5D = j + (ss%5Bi%5D == s%5Bj%5D)%0A return s%5Bp%5B-1%5D:%5D%5B::-1%5D + s
10f72d72e988bf4aa570e21b0e0d6979edb843a7
add example "fit text path into a box"
examples/addons/fit_text_path_into_box.py
examples/addons/fit_text_path_into_box.py
Python
0
@@ -0,0 +1,1096 @@ +# Copyright (c) 2021, Manfred Moitzi%0A# License: MIT License%0A%0Afrom pathlib import Path%0Aimport ezdxf%0Afrom ezdxf import path, zoom%0Afrom ezdxf.math import Matrix44%0Afrom ezdxf.tools import fonts%0Afrom ezdxf.addons import text2path%0A%0ADIR = Path('~/Desktop/Outbox').expanduser()%0Afonts.load()%0A%0Adoc = ezdxf.new()%0Adoc.layers.new('OUTLINE')%0Adoc.layers.new('FILLING')%0Amsp = doc.modelspace()%0A%0Aattr = %7B'layer': 'OUTLINE', 'color': 1%7D%0Aff = fonts.FontFace(family=%22Arial%22)%0Asx, sy = 4, 2%0A# create target box%0Amsp.add_lwpolyline(%5B(0, 0), (sx, 0), (sx, sy), (0, sy)%5D, close=True)%0Atext_as_paths = text2path.make_paths_from_str(%22Squeeze Me%22, ff)%0Afinal_paths = path.fit_paths_into_box(text_as_paths, size=(sx, sy, 0), uniform=False)%0Afinal_paths = path.transform_paths(final_paths, Matrix44.scale(-1, 1, 1))%0A%0A# move bottom/left corner to (0, 0) if required:%0Abbox = path.bbox(final_paths)%0Adx, dy, dz = -bbox.extmin%0Afinal_paths = path.transform_paths(final_paths, Matrix44.translate(dx,dy, dz))%0A%0Apath.render_lwpolylines(msp, final_paths, distance=0.01, dxfattribs=attr)%0A%0Azoom.extents(msp)%0Adoc.saveas(DIR / 'text2path.dxf')%0A
e075b0b1c8d581107209e869eda7f6ff07a7321c
Add script to create a historic->modern dictionary
reverse_dict.py
reverse_dict.py
Python
0
@@ -0,0 +1,1517 @@ +%22%22%22Reverse modern-%3Ehistoric spelling variants dictonary to historic-%3Emodern%0Amappings%0A%22%22%22%0Aimport argparse%0Aimport codecs%0Aimport json%0Afrom collections import Counter%0A%0Aif __name__ == '__main__':%0A parser = argparse.ArgumentParser()%0A parser.add_argument('input_dict', help='the name of the json file '%0A 'containing the modern-%3Espelling variants dictionary')%0A args = parser.parse_args()%0A%0A dict_file = args.input_dict%0A%0A modern_dict = %7B%7D%0A historic_dict = %7B%7D%0A%0A with codecs.open(dict_file, 'rb', 'utf8') as f:%0A modern_dict = json.load(f, encoding='utf-8')%0A%0A for modern_word, variants in modern_dict.iteritems():%0A for var in variants:%0A if var not in historic_dict.keys():%0A historic_dict%5Bvar%5D = Counter()%0A historic_dict%5Bvar%5D%5Bmodern_word%5D += 1%0A%0A print '#words in modern dict: %7B%7D'.format(len(modern_dict))%0A print '#words in historic dict: %7B%7D'.format(len(historic_dict))%0A%0A # find historic words that map to mulitple terms%0A mappings_counter = Counter()%0A%0A print '%5Cnhistoric word%5Ctmodern variant%5Ctfrequency'%0A%0A for w, mappings in historic_dict.iteritems():%0A mappings_counter%5Bstr(len(mappings)).zfill(3)%5D += 1%0A if len(mappings) %3E 1:%0A for variant, freq in mappings.iteritems():%0A print '%7B%7D%5Ct%7B%7D%5Ct%7B%7D'.format(w, variant, freq)%0A%0A mp = mappings_counter.keys()%0A mp.sort()%0A print '%5Cn#mappings%5Ct#historic words'%0A for m in mp:%0A print '%7B%7D%5Ct%7B%7D'.format(m, mappings_counter%5Bm%5D)%0A