commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
31d018181c5183acadbe309a250aed17cbae5a28
Create Add_Binary.py
Array/Add_Binary.py
Array/Add_Binary.py
Python
0.000002
@@ -0,0 +1,699 @@ +Given two binary strings, return their sum (also a binary string).%0A%0AFor example,%0Aa = %2211%22%0Ab = %221%22%0AReturn %22100%22.%0A%0Aclass Solution:%0A # @param a, a string%0A # @param b, a string%0A # @return a string%0A def addBinary(self, a, b):%0A A = len(a)%0A B = len(b)%0A i = 1%0A result = %5B%5D%0A carry = 0%0A %0A while i %3C= max(A,B):%0A sum = carry %0A if i %3C= A:%0A sum += int(a%5B-i%5D) %0A if i %3C= B:%0A sum += int(b%5B-i%5D)%0A bit = sum %25 2%0A carry = sum / 2%0A i += 1%0A result.insert(0,str(bit))%0A if carry %3E 0 :%0A result.insert(0,'1')%0A return ''.join(result)%0A
12192eca146dc1974417bd4fd2cf3722e0049910
add arduino example
example/ard2rrd.py
example/ard2rrd.py
Python
0.000046
@@ -0,0 +1,756 @@ +#!/usr/bin/env python3%0A# -*- coding: utf-8 -*-%0A%0A# Arduino UNO A0 value to RRD db%0A# - read an integer from a serial port and store it on RRD redis database%0A%0Aimport serial%0Afrom pyRRD_Redis import RRD_redis, StepAddFunc%0A%0A# some const%0ATAG_NAME = 'arduino_a0'%0A%0A# init serial port and RRD db%0Aser = serial.Serial(port='/dev/ttyACM0', baudrate=9600, timeout=1)%0Arrd = RRD_redis('rrd:' + TAG_NAME, size=2048, step=1.0, add_func=StepAddFunc.avg)%0A# fill database%0Awhile True:%0A # read A0 on serial%0A try:%0A a0 = int(ser.readline())%0A if not 0 %3C= a0 %3C= 1023:%0A raise ValueError%0A except ValueError:%0A a0 = None%0A # store value%0A if a0 is not None:%0A # store with scale to 0/100 %25%0A rrd.add_step(float(a0) * 100 / 1023)%0A
013ee19808dc86d29cb3aa86b38dc35fe98a5580
add to and remove from /etc/hosts some agent node info so condor can recognise its workers
conpaas-services/src/conpaas/services/htcondor/manager/node_info.py
conpaas-services/src/conpaas/services/htcondor/manager/node_info.py
Python
0.000004
@@ -0,0 +1,2421 @@ +%22%22%22%0ACopyright (c) 2010-2013, Contrail consortium.%0AAll rights reserved.%0A%0ARedistribution and use in source and binary forms, %0Awith or without modification, are permitted provided%0Athat the following conditions are met:%0A%0A 1. Redistributions of source code must retain the%0A above copyright notice, this list of conditions%0A and the following disclaimer.%0A 2. Redistributions in binary form must reproduce%0A the above copyright notice, this list of %0A conditions and the following disclaimer in the%0A documentation and/or other materials provided%0A with the distribution.%0A 3. Neither the name of the Contrail consortium nor the%0A names of its contributors may be used to endorse%0A or promote products derived from this software %0A without specific prior written permission.%0A%0ATHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND%0ACONTRIBUTORS %22AS IS%22 AND ANY EXPRESS OR IMPLIED WARRANTIES,%0AINCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF%0AMERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE%0ADISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR%0ACONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,%0ASPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, %0ABUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR %0ASERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS %0AINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,%0AWHETHER IN CONTRACT, STRICT LIABILITY, OR TORT%0A(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT%0AOF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE%0APOSSIBILITY OF SUCH DAMAGE.%0A%22%22%22%0A%0Aimport os%0Aimport re%0Afrom functools import wraps%0A%0Adef test_rw_permissions(f):%0A%09%22%22%22%0A%09Checks the read/write permissions of the specified file%0A%09%22%22%22%0A%09@wraps(f)%0A%09def rw_check(thefile, *args, **kwargs):%0A%09%09if not os.access(thefile, os.R_OK %7C os.W_OK):%0A%09%09%09raise Exception(%22Cannot read/write file %25s %22 %25 thefile)%0A%09%09else:%0A%09%09%09return f(thefile, *args, **kwargs)%0A%09return rw_check%0A%0A@test_rw_permissions%0Adef add_node_info(hostsfile, ip, vmid):%0A%09%22%22%22%0A%09Add the newly created agent-IP and VM-id to the hostsfile%0A%09%22%22%22%0A%09targetfile = open(hostsfile,'a')%0A%09targetfile.write(%22%25s%09worker-%25s.htc%5Cn%22 %25 (ip, vmid))%0A%09targetfile.close()%0A%0Adef remove_node_info(hostsfile, ip):%0A%09%22%22%22%0A%09Remove the agent-IP and VM-id from the hostsfile%0A%09%22%22%22%0A%09contentlines = open(hostsfile).readlines()%0A%09targetfile = open(hostsfile, 'w')%0A%09for line in contentlines:%0A%09%09if not re.search('%5E' + ip, line):%0A%09%09%09targetfile.write(line)%0A
2f47284b44ceef3c12990a4f9621062040fe6fcb
Add day 4 solution
day4.py
day4.py
Python
0.001388
@@ -0,0 +1,259 @@ +#!/usr/bin/env python%0A%0Afrom hashlib import md5%0A%0Atests = %5B'abcdef', 'pqrstuv'%5D%0A%0Astring = 'iwrupvqb'%0Afor idx in range(10000000):%0A hash = md5((string + str(idx)).encode('ascii'))%0A if hash.hexdigest().startswith('000000'):%0A print(idx)%0A break%0A%0A
e5008fdf481a80db3b5583d35e6fd369a28cd7ce
drop session_details for sessions
example/__init__.py
example/__init__.py
from pupa.scrape import Jurisdiction from .people import PersonScraper class Example(Jurisdiction): jurisdiction_id = 'ocd-jurisdiction/country:us/state:ex/place:example' name = 'Example Legislature' url = 'http://example.com' provides = ['people'] parties = [ {'name': 'Independent' }, {'name': 'Green' }, {'name': 'Bull-Moose'} ] session_details = { '2013': {'_scraped_name': '2013'} } def get_scraper(self, session, scraper_type): if scraper_type == 'people': return PersonScraper def scrape_session_list(self): return ['2013']
Python
0
@@ -391,28 +391,21 @@ sion -_detail s = -%7B +%5B %0A '20 @@ -404,18 +404,26 @@ + %7B'name': '2013' -: %7B +, '_sc @@ -447,17 +447,17 @@ 3'%7D%0A -%7D +%5D %0A%0A de
cd9c9080a00cc7e05b5ae4574dd39ddfc86fef3b
Create enc.py
enc.py
enc.py
Python
0.000001
@@ -0,0 +1,2165 @@ +#!/usr/bin/python%0A%22%22%22%0AGenerate encrypted messages wrapped in a self-decrypting python script%0Ausage: python enc.py password %3E out.py%0Awhere password is the encryption password and out.py is the message/script file%0Ato decrypt use: python out.py password%0Athis will print the message to stdout.%0A%22%22%22%0A%0Aimport sys, random%0A%0Adef encrypt(key, msg):%0A encrypted = %5B%5D%0A for i, c in enumerate(msg):%0A%09key_c = ord(key%5Bi %25 len(key)%5D)-32%0A%09msg_c = ord(c)-32%0A%09encrypted.append(chr(((msg_c + key_c) %25 95)+32))%0A return ''.join(encrypted)%0A%0Adef decrypt(key, enc):%0A msg=%5B%5D%0A for i, c in enumerate(enc):%0A key_c = ord(key%5Bi %25 len(key)%5D)-32%0A enc_c = ord(c)-32%0A%09msg.append(chr(((enc_c - key_c) %25 95)+32))%0A return ''.join(msg)%0A%0Adef check(enc):%0A is_good=True %0A for i, c in enumerate(enc):%0A is_good = is_good and (32 %3C= ord(c) %3C= 126)%0A return is_good%0A%0Adef make_randstr(msg_len):%0A sl = %5B%5D%0A r = random.SystemRandom()%0A for i in range(msg_len):%0A sl.append(chr(r.randint(32,126)))%0A return ''.join(sl)%0A%0Aif __name__ == '__main__':%0A msg = sys.stdin.read().replace(%22%5Cn%22,%22%5C%5Cn%22).replace(%22%5Ct%22,%22%5C%5Ct%22)%0A randstr = make_randstr(len(msg))%0A key = encrypt(sys.argv%5B1%5D, randstr)%0A encrypted = encrypt(key, msg)%0A decrypted = decrypt(key, encrypted)%0A if not msg == decrypted:%0A print msg%0A print decrypted%0A raise Exception(%22Encryption Fail%22)%0A%0A print %22%22%22%0A#!/usr/bin/python%0A%0Aimport sys%0A %0Adef encrypt(key, msg):%0A encrypted = %5B%5D%0A for i, c in enumerate(msg):%0A key_c = ord(key%5Bi %25 len(key)%5D)-32%0A msg_c = ord(c)-32%0A encrypted.append(chr(((msg_c + key_c) %25 95)+32))%0A return ''.join(encrypted)%0A%0Adef decrypt(key, enc):%0A msg=%5B%5D%0A for i, c in enumerate(enc):%0A key_c = ord(key%5Bi %25 len(key)%5D)-32%0A enc_c = ord(c)-32%0A msg.append(chr(((enc_c - key_c) %25 95)+32))%0A return ''.join(msg)%0A%0Aif __name__ == '__main__':%22%22%22%0A print %22%5Ctrandstr = %22, repr(randstr)%0A print %22%5Ctenc = %22, repr(encrypted)%0A print %22%5Ctkey = encrypt(sys.argv%5B1%5D, randstr)%22%0A print %22%5Ctdecrypted = decrypt(key, enc).replace(%5C%22%5C%5C%5C%5Cn%5C%22,%5C%22%5C%5Cn%5C%22).replace(%5C%22%5C%5C%5C%5Ct%5C%22,%5C%22%5C%5Ct%5C%22)%22%0A print %22%5Ctprint decrypted%22%0A%0A
8adfedd0c30fab796fccac6ec58c09e644a91b2f
Add script to shuffle paired fastq sequences.
shuffle_fastq.py
shuffle_fastq.py
Python
0
@@ -0,0 +1,941 @@ +# shuffles the sequences in a fastq file%0Aimport os%0Aimport random%0Afrom Bio import SeqIO%0Aimport fileinput%0Afrom argparse import ArgumentParser%0A%0Aif __name__ == %22__main__%22:%0A parser = ArgumentParser()%0A parser.add_argument(%22--fq1%22, required=%22True%22)%0A parser.add_argument(%22--fq2%22, required=%22True%22)%0A args = parser.parse_args()%0A with open(args.fq1) as in_handle:%0A fq1 = %5Bx for x in SeqIO.parse(in_handle, %22fastq-sanger%22)%5D%0A with open(args.fq2) as in_handle:%0A fq2 = %5Bx for x in SeqIO.parse(in_handle, %22fastq-sanger%22)%5D%0A order = range(len(fq1))%0A random.shuffle(order)%0A%0A fq1_name = os.path.splitext(args.fq1)%5B0%5D%0A fq2_name = os.path.splitext(args.fq2)%5B0%5D%0A with open(fq1_name + %22.shuffled.fq%22, %22wa%22) as fq1_handle, open(fq2_name + %22.shuffled.fq%22, %22wa%22) as fq2_handle:%0A for i in order:%0A fq1_handle.write(fq1%5Bi%5D.format(%22fastq-sanger%22))%0A fq2_handle.write(fq2%5Bi%5D.format(%22fastq-sanger%22))%0A
b7f9e5555481ba4e34bcc12beecf540d3204a15f
Fix pep8 issue
raven/contrib/celery/__init__.py
raven/contrib/celery/__init__.py
""" raven.contrib.celery ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ try: from celery.task import task except ImportError: from celery.decorators import task from celery.signals import task_failure from raven.base import Client class CeleryMixin(object): def send_encoded(self, message): "Errors through celery" self.send_raw.delay(message) @task(routing_key='sentry') def send_raw(self, message): return super(CeleryMixin, self).send_encoded(message) class CeleryClient(CeleryMixin, Client): pass def register_signal(client): @task_failure.connect(weak=False) def process_failure_signal(sender, task_id, exception, args, kwargs, traceback, einfo, **kw): client.captureException( exc_info=einfo.exc_info, extra={ 'task_id': task_id, 'task': sender, 'args': args, 'kwargs': kwargs, })
Python
0
@@ -1086,9 +1086,8 @@ %7D)%0A -%0A
dba14e6dfbaacf79d88f1be0b831488f45fc1bfc
Create coroutine.py
gateway/src/test/coroutine.py
gateway/src/test/coroutine.py
Python
0.000006
@@ -0,0 +1,563 @@ +#!/usr/bin/python3.5%0Aimport asyncio%0Aimport time%0A%0Anow = lambda: time.time()%0Aasync def func(x):%0A print('Waiting for %25d s' %25 x)%0A await asyncio.sleep(x)%0A return 'Done after %7B%7Ds'.format(x)%0A%0Astart = now()%0A%0Acoro1 = func(1)%0Acoro2 = func(2)%0Acoro3 = func(4)%0A%0Atasks = %5B%0A asyncio.ensure_future(coro1),%0A asyncio.ensure_future(coro2),%0A asyncio.ensure_future(coro3)%0A%5D%0A%0Aloop = asyncio.get_event_loop()%0Aloop.run_until_complete(asyncio.wait(tasks))%0A%0Afor task in tasks:%0A print('Task return: ', task.result())%0A%0Aprint('Program consumes: %25f s' %25 (now() - start))%0A
b9d47f54b76345f0c8f7d486282fc416ba540aee
Add specs for ArgumentParser
tests/test_argument_parser.py
tests/test_argument_parser.py
Python
0
@@ -0,0 +1,732 @@ +import pytest%0A%0Afrom codeclimate_test_reporter.components.argument_parser import ArgumentParser%0A%0A%0Adef test_parse_args_default():%0A parsed_args = ArgumentParser().parse_args(%5B%5D)%0A%0A assert(parsed_args.file == %22./.coverage%22)%0A assert(parsed_args.token is None)%0A assert(parsed_args.stdout is False)%0A assert(parsed_args.debug is False)%0A assert(parsed_args.version is False)%0A%0Adef test_parse_args_with_options():%0A args = %5B%22--version%22, %22--debug%22, %22--stdout%22, %22--file%22, %22file%22, %22--token%22, %22token%22%5D%0A parsed_args = ArgumentParser().parse_args(args)%0A%0A assert(parsed_args.debug)%0A assert(parsed_args.file == %22file%22)%0A assert(parsed_args.token == %22token%22)%0A assert(parsed_args.stdout)%0A assert(parsed_args.version)%0A
614579c38bea10798d285ec2608650d36369020a
add test demonstrating duplicate stream handling
tests/test_invalid_streams.py
tests/test_invalid_streams.py
Python
0
@@ -0,0 +1,265 @@ +import fixtures%0A%0Aimport dnfile%0A%0A%0Adef test_duplicate_stream():%0A path = fixtures.DATA / %22invalid-streams%22 / %22duplicate-stream.exe%22%0A%0A dn = dnfile.dnPE(path)%0A%0A assert %22#US%22 in dn.net.metadata.streams%0A assert dn.net.user_strings.get_us(1).value == %22BBBBBBBB%22
ffb5caf83055e734baf711366b6779ecb24a013c
Add script to generate other adobe themes
addons/adobe/clone.py
addons/adobe/clone.py
Python
0
@@ -0,0 +1,1935 @@ +#!/usr/bin/env python%0Afrom PIL import Image, ImageEnhance%0Aimport PIL.ImageOps%0Aimport fnmatch%0Aimport shutil%0Aimport os%0A%0Adef globPath(path, pattern):%0A result = %5B%5D%0A for root, subdirs, files in os.walk(path):%0A for filename in files:%0A if fnmatch.fnmatch(filename, pattern):%0A result.append(os.path.join(root, filename))%0A return result%0A%0A%0Adef inverse(inpng, outpng):%0A image = Image.open(inpng)%0A if image.mode == 'RGBA':%0A r, g, b, a = image.split()%0A rgb_image = Image.merge('RGB', (r, g, b))%0A inverted_image = PIL.ImageOps.invert(rgb_image)%0A r2, g2, b2 = inverted_image.split()%0A final_transparent_image = Image.merge('RGBA', (r2, g2, b2, a))%0A final_transparent_image.save(outpng)%0A else:%0A inverted_image = PIL.ImageOps.invert(image)%0A inverted_image.save(outpng)%0A%0A%0Adef darken(inpng, outpng, darkness):%0A im1 = Image.open(inpng)%0A im2 = im1.point(lambda p: p * darkness)%0A im2.save(outpng)%0A%0A%0Adef bright(inpng, outpng, brightness):%0A peak = Image.open(inpng)%0A enhancer = ImageEnhance.Brightness(peak)%0A bright = enhancer.enhance(brightness)%0A bright.save(outpng)%0A%0A%0Adef makeClone(name, brightness):%0A outdir = os.path.join(%22..%22, name)%0A if not os.path.isdir(outdir):%0A os.makedirs(outdir)%0A%0A for p in globPath('.', %22**%22):%0A outfile = os.path.join(outdir, p)%0A curdir = os.path.dirname(outfile)%0A if not os.path.isdir(curdir):%0A os.makedirs(curdir)%0A if p.endswith(%22.png%22):%0A bright(p, outfile, brightness)%0A elif p.endswith(%22.tres%22):%0A content = open(p).read()%0A content = content.replace(%22res://addons/adobe/%22, %22res://addons/%7B%7D/%22.format(name))%0A of = open(outfile, 'w')%0A of.write(content)%0A of.close()%0A else:%0A shutil.copy(p, outfile)%0A%0AmakeClone(%22adobe_dark%22, 0.65)%0AmakeClone(%22adobe_light%22, 1.35)%0A
c5ecaef62d788b69446181c6ba495cb273bf98ef
Add rolling mean scatter plot example
altair/examples/scatter_with_rolling_mean.py
altair/examples/scatter_with_rolling_mean.py
Python
0.000002
@@ -0,0 +1,680 @@ +%22%22%22%0AScatter Plot with Rolling Mean%0A------------------------------%0AA scatter plot with a rolling mean overlay. In this example a 30 day window%0Ais used to calculate the mean of the maximum temperature around each date.%0A%22%22%22%0A# category: scatter plots%0A%0Aimport altair as alt%0Afrom vega_datasets import data%0A%0Asource = data.seattle_weather()%0A%0Aline = alt.Chart(source).mark_line(%0A color='red', %0A size=3%0A).transform_window(%0A rolling_mean='mean(temp_max)',%0A frame=%5B-15, 15%5D%0A).encode(%0A x='date:T',%0A y='rolling_mean:Q'%0A)%0A%0Apoints = alt.Chart(source).mark_point().encode(%0A x='date:T', %0A y=alt.Y('temp_max:Q', %0A axis=alt.Axis(title='Max Temp'))%0A)%0A%0Apoints + line%0A
5ec793ffb8c260a02ab7da655b5f56ff3c3f5da7
add find_anagrams.py
algo/find_anagrams.py
algo/find_anagrams.py
Python
0.000321
@@ -0,0 +1,310 @@ +words = %22oolf folo oolf lfoo fool oofl fool loof oofl folo abr bra bar rab rba abr arb bar abr abr%22%0Awords = %5Bword.strip() for word in words.split(%22 %22)%5D%0A%0Aanagrams = %7B%7D%0A%0Afor word in words:%0A sorted_word = ''.join(sorted(word))%0A anagrams%5Bsorted_word%5D = anagrams.get(sorted_word, %5B%5D) + %5Bword%5D%0A%0Aprint anagrams%0A
f830c778fd06e1548da0b87aafa778834005c64e
Add fls simprocedures
angr/procedures/win32/fiber_local_storage.py
angr/procedures/win32/fiber_local_storage.py
Python
0
@@ -0,0 +1,1644 @@ +import angr%0A%0AKEY = 'win32_fls'%0A%0Adef mutate_dict(state):%0A d = dict(state.globals.get(KEY, %7B%7D))%0A state.globals%5BKEY%5D = d%0A return d%0A%0Adef has_index(state, idx):%0A if KEY not in state.globals:%0A return False%0A return idx in state.globals%5BKEY%5D%0A%0Aclass FlsAlloc(angr.SimProcedure):%0A def run(self, callback):%0A if not self.state.solver.is_true(callback == 0):%0A raise angr.errors.SimValueError(%22Can't handle callback function in FlsAlloc%22)%0A%0A d = mutate_dict(self.state)%0A new_key = len(d) + 1%0A d%5Bnew_key%5D = self.state.se.BVV(0, self.state.arch.bits)%0A return new_key%0A%0Aclass FlsFree(angr.SimProcedure):%0A def run(self, index):%0A set_val = self.inline_call(FlsSetValue, (index, self.state.se.BVV(0, self.state.arch.bits)))%0A return set_val.ret_expr%0A%0Aclass FlsSetValue(angr.SimProcedure):%0A def run(self, index, value):%0A conc_indexs = self.state.se.any_n_int(index, 2)%0A if len(conc_indexs) != 1:%0A raise angr.errors.SimValueError(%22Can't handle symbolic index in FlsSetValue%22)%0A conc_index = conc_indexs%5B0%5D%0A%0A if not has_index(self.state, conc_index):%0A return 0%0A%0A mutate_dict(self.state)%5Bconc_index%5D = value%0A return 1%0A%0Aclass FlsGetValue(angr.SimProcedure):%0A def run(self, index):%0A conc_indexs = self.state.se.any_n_int(index, 2)%0A if len(conc_indexs) != 1:%0A raise angr.errors.SimValueError(%22Can't handle symbolic index in FlsGetValue%22)%0A conc_index = conc_indexs%5B0%5D%0A%0A if not has_index(self.state, conc_index):%0A return 0%0A%0A return self.globals%5BKEY%5D%5Bconc_index%5D%0A
1bda23c9e6fee7815617a8ad7f64c80a32e223c5
Add script for jira story point report.
scripts/jira.py
scripts/jira.py
Python
0
@@ -0,0 +1,2346 @@ +#!/usr/bin/python%0A%0Aimport sys%0Aimport os%0Aimport requests%0Aimport urllib%0A%0A%0Ag_user = None%0Ag_pass = None%0Ag_sprint = None%0A%0A%0Adef usage():%0A print(%22%22)%0A print(%22usage: %22 + g_script_name + %22 --user username --pass password --sprint sprintname%22)%0A print(%22%22)%0A sys.exit(1)%0A%0A%0Adef unknown_arg(s):%0A print(%22%22)%0A print(%22ERROR: Unknown argument: %22 + s)%0A print(%22%22)%0A usage()%0A%0A%0Adef parse_args(argv):%0A global g_user%0A global g_pass%0A global g_sprint%0A%0A i = 1%0A while (i %3C len(argv)):%0A s = argv%5Bi%5D%0A%0A if (s == %22--user%22):%0A i += 1%0A if (i %3E len(argv)):%0A usage()%0A g_user = argv%5Bi%5D%0A elif (s == %22--pass%22):%0A i += 1%0A if (i %3E len(argv)):%0A usage()%0A g_pass = argv%5Bi%5D%0A elif (s == %22--sprint%22):%0A i += 1%0A if (i %3E len(argv)):%0A usage()%0A g_sprint = argv%5Bi%5D%0A elif (s == %22-h%22 or s == %22--h%22 or s == %22-help%22 or s == %22--help%22):%0A usage()%0A else:%0A unknown_arg(s)%0A%0A i += 1%0A%0A if (g_user is None):%0A usage()%0A%0A if (g_pass is None):%0A usage()%0A%0A if (g_sprint is None):%0A usage()%0A%0A%0Adef main(argv):%0A %22%22%22%0A Main program.%0A%0A @return: none%0A %22%22%22%0A global g_script_name%0A%0A g_script_name = os.path.basename(argv%5B0%5D)%0A parse_args(argv)%0A%0A url = 'https://0xdata.atlassian.net/rest/api/2/search?jql=sprint=%22' + urllib.quote(g_sprint) + '%22&maxResults=1000'%0A r = requests.get(url, auth=(g_user, g_pass))%0A if (r.status_code != 200):%0A print(%22ERROR: status code is %22 + str(r.status_code))%0A sys.exit(1)%0A j = r.json()%0A issues = j%5Bu'issues'%5D%0A story_points_map = %7B%7D%0A for issue in issues:%0A name = issue%5Bu'fields'%5D%5Bu'assignee'%5D%5Bu'name'%5D%0A story_points = issue%5Bu'fields'%5D%5Bu'customfield_10004'%5D%0A if story_points is None:%0A story_points = 0%0A else:%0A story_points = float(story_points)%0A if name in story_points_map:%0A n = story_points_map%5Bname%5D%0A story_points_map%5Bname%5D = n + story_points%0A else:%0A story_points_map%5Bname%5D = story_points%0A%0A for key in sorted(story_points_map.keys()):%0A value = story_points_map%5Bkey%5D%0A print(%22%7B%7D: %7B%7D%22).format(key, value)%0A%0A%0Aif __name__ == %22__main__%22:%0A main(sys.argv)%0A
a24095964e32da33ea946b3c28bdc829a505585d
Add lidar example
lidar.py
lidar.py
Python
0.000001
@@ -0,0 +1,1563 @@ +%22%22%22 Copyright 2021 CyberTech Labs Ltd.%0A%0A Licensed under the Apache License, Version 2.0 (the %22License%22);%0A you may not use this file except in compliance with the License.%0A You may obtain a copy of the License at%0A%0A http://www.apache.org/licenses/LICENSE-2.0%0A%0A Unless required by applicable law or agreed to in writing, software%0A distributed under the License is distributed on an %22AS IS%22 BASIS,%0A WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A See the License for the specific language governing permissions and%0A limitations under the License. %22%22%22%0A%0Aimport math%0A%0Aw = 240%0Ah = 280%0Ascale = 0.5%0A%0AwaitTimer = 500%0AmoveControl = 0%0AtickPerSecond = 1000 // waitTimer%0Awhile not brick.keys().wasPressed(KeysEnum.Up):%0A moveControl = (moveControl + 1) %25 (10 * tickPerSecond)%0A power = 100%0A if math.sin(moveControl / tickPerSecond) %3C 0:%0A power = -100%0A%0A brick.motor('M3').setPower(power)%0A brick.motor('M4').setPower(power)%0A%0A pic = %5B0x008800%5D * (h * w)%0A%0A for j in range(w // 2, w):%0A pic%5Bh // 2 * w + j%5D = 0x888888%0A%0A data = brick.lidar().read()%0A for i in range(360):%0A distance = data%5Bi%5D%0A if distance == 0:%0A continue%0A theta = i * math.pi / 180%0A x = distance * math.cos(theta)%0A y = distance * math.sin(theta)%0A x_px = min(w - 1, max(0, math.floor(x * scale + w / 2)))%0A y_px = min(h - 1, max(0, math.floor(y * scale + h / 2)))%0A pic%5By_px * w + x_px%5D = 0%0A%0A brick.display().show(pic, w, h, 'rgb32')%0A script.wait(waitTimer)%0A%0Abrick.stop()%0A
da7ac24484b75c8ea29c300aa46c42f0fdd9519d
fix for sentry-113947841
src/sentry/receivers/onboarding.py
src/sentry/receivers/onboarding.py
from __future__ import print_function, absolute_import from django.db import IntegrityError, transaction from django.utils import timezone from sentry.models import ( OnboardingTask, OnboardingTaskStatus, OrganizationOnboardingTask ) from sentry.plugins import IssueTrackingPlugin, NotificationPlugin from sentry.signals import ( event_processed, project_created, first_event_pending, first_event_received, member_invited, member_joined, plugin_enabled, issue_tracker_used, ) from sentry.utils.javascript import has_sourcemap @project_created.connect(weak=False) def record_new_project(project, user, **kwargs): try: with transaction.atomic(): OrganizationOnboardingTask.objects.create( organization=project.organization, task=OnboardingTask.FIRST_PROJECT, user=user, status=OnboardingTaskStatus.COMPLETE, project_id=project.id, date_completed=timezone.now(), ) except IntegrityError: try: with transaction.atomic(): OrganizationOnboardingTask.objects.create( organization=project.organization, task=OnboardingTask.SECOND_PLATFORM, user=user, status=OnboardingTaskStatus.PENDING, project_id=project.id, date_completed=timezone.now(), ) except IntegrityError: pass @first_event_pending.connect(weak=False) def record_raven_installed(project, user, **kwargs): try: with transaction.atomic(): OrganizationOnboardingTask.objects.create( organization=project.organization, task=OnboardingTask.FIRST_EVENT, status=OnboardingTaskStatus.PENDING, user=user, project_id=project.id, date_completed=timezone.now() ) except IntegrityError: pass @first_event_received.connect(weak=False) def record_first_event(project, group, **kwargs): """ Requires up to 2 database calls, but should only run with the first event in any project, so performance should not be a huge bottleneck. """ # If complete, pass (creation fails due to organization, task unique constraint) # If pending, update. # If does not exist, create. rows_affected, created = OrganizationOnboardingTask.objects.create_or_update( organization=project.organization, task=OnboardingTask.FIRST_EVENT, status=OnboardingTaskStatus.PENDING, values={ 'status': OnboardingTaskStatus.COMPLETE, 'project_id': project.id, 'date_completed': project.first_event, 'data': {'platform': group.platform}, } ) # If first_event task is complete if not rows_affected and not created: try: oot = OrganizationOnboardingTask.objects.filter( organization=project.organization, task=OnboardingTask.FIRST_EVENT )[0] except IndexError: return # Only counts if it's a new project and platform if oot.project_id != project.id and oot.data['platform'] != group.platform: OrganizationOnboardingTask.objects.create_or_update( organization=project.organization, task=OnboardingTask.SECOND_PLATFORM, status=OnboardingTaskStatus.PENDING, values={ 'status': OnboardingTaskStatus.COMPLETE, 'project_id': project.id, 'date_completed': project.first_event, 'data': {'platform': group.platform}, } ) @member_invited.connect(weak=False) def record_member_invited(member, user, **kwargs): try: with transaction.atomic(): OrganizationOnboardingTask.objects.create( organization=member.organization, task=OnboardingTask.INVITE_MEMBER, user=user, status=OnboardingTaskStatus.PENDING, date_completed=timezone.now(), data={'invited_member_id': member.id} ) except IntegrityError: pass @member_joined.connect(weak=False) def record_member_joined(member, **kwargs): OrganizationOnboardingTask.objects.create_or_update( organization=member.organization, task=OnboardingTask.INVITE_MEMBER, status=OnboardingTaskStatus.PENDING, values={ 'status': OnboardingTaskStatus.COMPLETE, 'date_completed': timezone.now(), 'data': {'invited_member_id': member.id} } ) @event_processed.connect(weak=False) def record_release_received(project, group, event, **kwargs): if event.get_tag('sentry:release'): try: with transaction.atomic(): OrganizationOnboardingTask.objects.create( organization=project.organization, task=OnboardingTask.RELEASE_TRACKING, status=OnboardingTaskStatus.COMPLETE, project_id=project.id, date_completed=timezone.now() ) except IntegrityError: pass @event_processed.connect(weak=False) def record_user_context_received(project, group, event, **kwargs): if event.data.get('sentry.interfaces.User'): try: with transaction.atomic(): OrganizationOnboardingTask.objects.create( organization=project.organization, task=OnboardingTask.USER_CONTEXT, status=OnboardingTaskStatus.COMPLETE, project_id=project.id, date_completed=timezone.now() ) except IntegrityError: pass @event_processed.connect(weak=False) def record_sourcemaps_received(project, group, event, **kwargs): if has_sourcemap(event): try: with transaction.atomic(): OrganizationOnboardingTask.objects.create( organization=project.organization, task=OnboardingTask.SOURCEMAPS, status=OnboardingTaskStatus.COMPLETE, project_id=project.id, date_completed=timezone.now() ) except IntegrityError: pass @plugin_enabled.connect(weak=False) def record_plugin_enabled(plugin, project, user, **kwargs): if isinstance(plugin, IssueTrackingPlugin): task = OnboardingTask.ISSUE_TRACKER status = OnboardingTaskStatus.PENDING elif isinstance(plugin, NotificationPlugin): task = OnboardingTask.NOTIFICATION_SERVICE status = OnboardingTaskStatus.COMPLETE try: with transaction.atomic(): OrganizationOnboardingTask.objects.create( organization=project.organization, task=task, status=status, user=user, project_id=project.id, date_completed=timezone.now(), data={'plugin': plugin.slug} ) except IntegrityError: pass @issue_tracker_used.connect(weak=False) def record_issue_tracker_used(plugin, project, user, **kwargs): OrganizationOnboardingTask.objects.create_or_update( organization=project.organization, task=OnboardingTask.ISSUE_TRACKER, status=OnboardingTaskStatus.PENDING, values={ 'status': OnboardingTaskStatus.COMPLETE, 'user': user, 'project_id': project.id, 'date_completed': timezone.now(), 'data': {'plugin': plugin.slug} } )
Python
0.000002
@@ -3303,17 +3303,21 @@ oot.data -%5B +.get( 'platfor @@ -3318,17 +3318,33 @@ latform' -%5D +, group.platform) != grou
ccf1fb5d5ef1e2b12bc49afd260b1d2d0a166a43
Prepare v2.20.7.dev
flexget/_version.py
flexget/_version.py
""" Current FlexGet version. This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by release scripts in continuous integration. Should (almost) never be set manually. The version should always be set to the <next release version>.dev The jenkins release job will automatically strip the .dev for release, and update the version again for continued development. """ __version__ = '2.20.6'
Python
0.000003
@@ -442,7 +442,11 @@ .20. -6 +7.dev '%0A
950e6b975323293ed8b73a5ffe8448072e0dac27
Fix downloader
support/download.py
support/download.py
# A file downloader. import contextlib, os, tempfile, timer, urllib2, urlparse class Downloader: def __init__(self, dir=None): self.dir = dir # Downloads a file and removes it when exiting a block. # Usage: # d = Downloader() # with d.download(url) as f: # use_file(f) def download(self, url, cookie=None): suffix = os.path.splitext(urlparse.urlsplit(url)[2])[1] fd, filename = tempfile.mkstemp(suffix=suffix, dir=self.dir) os.close(fd) with timer.print_time('Downloading', url, 'to', filename): opener = urllib2.build_opener() if cookie: opener.addheaders.append(('Cookie', cookie)) num_tries = 2 for i in range(num_tries): try: f = opener.open(url) except urllib2.URLError, e: print('Failed to open url', url) continue length = f.headers.get('content-length') if not length: print('Failed to get content-length') continue length = int(length) with open(filename, 'wb') as out: count = 0 while count < length: data = f.read(1024 * 1024) count += len(data) out.write(data) @contextlib.contextmanager def remove(filename): try: yield filename finally: pass #os.remove(filename) return remove(filename)
Python
0.000001
@@ -1312,14 +1312,8 @@ -pass # os.r
570aaad3da93f9252efb787a58bbe5151eff93d4
Create run_ToolKit.py
0.0.5/run_ToolKit.py
0.0.5/run_ToolKit.py
Python
0.000002
@@ -0,0 +1,86 @@ +# run_ToolKit.py%0Afrom modulos import main%0A%0Aif __name__ == %22__main__%22:%0A main.main()%0A
857ccf7f6cfed4e8663d635c119f8683c9ee09e0
Add random choice plugin (with_random_choice)
lib/ansible/runner/lookup_plugins/random_choice.py
lib/ansible/runner/lookup_plugins/random_choice.py
Python
0
@@ -0,0 +1,1277 @@ +# (c) 2012, Michael DeHaan %[email protected]%3E%0A#%0A# This file is part of Ansible%0A#%0A# Ansible is free software: you can redistribute it and/or modify%0A# it under the terms of the GNU General Public License as published by%0A# the Free Software Foundation, either version 3 of the License, or%0A# (at your option) any later version.%0A#%0A# Ansible is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU General Public License for more details.%0A#%0A# You should have received a copy of the GNU General Public License%0A# along with Ansible. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A%0Aimport subprocess%0Afrom ansible import utils, errors%0Aimport random%0A%0A# useful for introducing chaos ... or just somewhat reasonably fair selection%0A# amongst available mirrors%0A#%0A# tasks:%0A# - debug: msg=$item%0A# with_random_choice:%0A# - one%0A# - two %0A# - three%0A%0Aclass LookupModule(object):%0A%0A def __init__(self, basedir=None, **kwargs):%0A self.basedir = basedir%0A%0A def run(self, terms, **kwargs):%0A if isinstance(terms, basestring):%0A terms = %5B terms %5D%0A return %5B random.choice(terms) %5D%0A%0A
b3f91806b525ddef50d541f937bed539f9bae20a
Use cache backend for sessions in deployed settings.
mezzanine/project_template/deploy/live_settings.py
mezzanine/project_template/deploy/live_settings.py
DATABASES = { "default": { # Ends with "postgresql_psycopg2", "mysql", "sqlite3" or "oracle". "ENGINE": "django.db.backends.postgresql_psycopg2", # DB name or path to database file if using sqlite3. "NAME": "%(proj_name)s", # Not used with sqlite3. "USER": "%(proj_name)s", # Not used with sqlite3. "PASSWORD": "%(db_pass)s", # Set to empty string for localhost. Not used with sqlite3. "HOST": "127.0.0.1", # Set to empty string for default. Not used with sqlite3. "PORT": "", } } SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTOCOL", "https") CACHE_MIDDLEWARE_SECONDS = 60 CACHES = { "default": { "BACKEND": "django.core.cache.backends.memcached.MemcachedCache", "LOCATION": "127.0.0.1:11211", } }
Python
0
@@ -818,16 +818,75 @@ 11211%22,%0A %7D%0A%7D%0A +%0ASESSION_ENGINE = %22django.contrib.sessions.backends.cache%22%0A
62545500553443863d61d9e5ecc80307c745a227
Add migration to remove non-{entity,classifier} dimensions from the database, and to recompute cubes if necessary
migrate/20110917T143029-remove-value-dimensions.py
migrate/20110917T143029-remove-value-dimensions.py
Python
0.000001
@@ -0,0 +1,1031 @@ +import logging%0A%0Afrom openspending.lib import cubes%0Afrom openspending import migration, model, mongo%0A%0Alog = logging.getLogger(__name__)%0A%0Adef up():%0A group_args = (%7B'dataset':1%7D, %7B%7D, %7B'num': 0%7D,%0A 'function (x, acc) %7B acc.num += 1 %7D')%0A%0A before = mongo.db.dimension.group(*group_args)%0A dims = model.dimension.find(%7B'type': %7B'$nin': %5B'entity', 'classifier'%5D%7D%7D)%0A for d in dims:%0A log.info(%22Removing dimension: %25s%22, d)%0A model.dimension.remove(%7B'_id': d%5B'_id'%5D%7D)%0A after = mongo.db.dimension.group(*group_args)%0A%0A for bf, af in zip(before, after):%0A if int(bf%5B'num'%5D) != int(af%5B'num'%5D):%0A log.warn(%22Number of dimensions for dimension '%25s' %22%0A %22changed. Recomputing cubes.%22, bf%5B'dataset'%5D)%0A ds = model.dataset.find_one(%7B'name': bf%5B'dataset'%5D%7D)%0A cubes.Cube.update_all_cubes(ds)%0A%0Adef down():%0A raise migration.IrreversibleMigrationError(%22Can't add back dimension %22%0A %22fields that we dropped!%22)
c599b5d470cf80b964af1b261a11540516e120df
Add Dehnen smoothing as a wrapper
galpy/potential_src/DehnenSmoothWrapperPotential.py
galpy/potential_src/DehnenSmoothWrapperPotential.py
Python
0
@@ -0,0 +1,1866 @@ +###############################################################################%0A# DehnenSmoothWrapperPotential.py: Wrapper to smoothly grow a potential%0A###############################################################################%0Afrom galpy.potential_src.WrapperPotential import SimpleWrapperPotential%0Aclass DehnenSmoothWrapperPotential(SimpleWrapperPotential):%0A def __init__(self,amp=1.,pot=None,tform=-4.,tsteady=None,ro=None,vo=None):%0A %22%22%22%0A NAME:%0A%0A __init__%0A%0A PURPOSE:%0A%0A initialize a DehnenSmoothWrapper Potential%0A%0A INPUT:%0A%0A amp - amplitude to be applied to the potential (default: 1.)%0A%0A pot - Potential instance or list thereof; the amplitude of this will be grown by this wrapper%0A%0A tform - start of growth%0A%0A tsteady - time from tform at which the potential is fully grown (default: -tform/2, st the perturbation is fully grown at tform/2)%0A%0A OUTPUT:%0A%0A (none)%0A%0A HISTORY:%0A%0A 2017-06-26 - Started - Bovy (UofT)%0A%0A %22%22%22%0A SimpleWrapperPotential.__init__(self,amp=amp,pot=pot,ro=ro,vo=vo)%0A self._tform= tform%0A if tsteady is None:%0A self._tsteady= self._tform/2.%0A else:%0A self._tsteady= self._tform+tsteady%0A self.hasC= False%0A self.hasC_dxdv= False%0A%0A def _smooth(self,t):%0A #Calculate relevant time%0A if t %3C self._tform:%0A smooth= 0.%0A elif t %3C self._tsteady:%0A deltat= t-self._tform%0A xi= 2.*deltat/(self._tsteady-self._tform)-1.%0A smooth= (3./16.*xi**5.-5./8*xi**3.+15./16.*xi+.5)%0A else: #bar is fully on%0A smooth= 1.%0A return smooth%0A%0A def _wrap(self,attribute,R,Z,phi=0.,t=0.):%0A return self._smooth(t)%5C%0A *self._wrap_pot_func(attribute)(self._pot,R,Z,phi=phi,t=t)%0A
ddc61e8158fb1dfb33b30a19f7e9cd3be8eaf3a2
add app.py
app.py
app.py
Python
0.000003
@@ -0,0 +1,141 @@ +from flask import Flask %0A %0Aapp = Flask(__name__) %0Aif __name__ == %22__main__%22: %0A%09app.run(host='0.0.0.0', port=5000, debug=True) %0A
cab4b903b986a7f8bfe4955bf80190bb7f33b012
Create bot.py
bot.py
bot.py
Python
0.000001
@@ -0,0 +1,360 @@ +# -*- coding: utf-8 -*-%0Aimport twitter_key%0Aimport tweepy%0Aimport markovtweet%0A%0Adef auth():%0A auth = tweepy.OAuthHandler(twitter_key.CONSUMER_KEY, twitter_key.CONSUMER_SECRET)%0A auth.set_access_token(twitter_key.ACCESS_TOKEN, twitter_key.ACCESS_SECRET)%0A return tweepy.API(auth)%0A%0Aif __name__ == %22__main__%22:%0A api = auth()%0A markovtweet.markovtweet(api)%0A
f1b11d2b111ef0b70f0babe6e025056ff1a68acc
Create InMoov.LeapMotionHandTracking.py
home/Alessandruino/InMoov.LeapMotionHandTracking.py
home/Alessandruino/InMoov.LeapMotionHandTracking.py
Python
0
@@ -0,0 +1,539 @@ +i01 = Runtime.createAndStart(%22i01%22,%22InMoov%22)%0A%0A#Set here the port of your InMoov Left Hand Arduino , in this case COM5%0AleftHand = i01.startLeftHand(%22COM5%22)%0A%0A#==============================%0A#Set the min/max values for fingers%0A%0Ai01.leftHand.thumb.setMinMax( 0, 61)%0Ai01.leftHand.index.map(0 , 89)%0Ai01.leftHand.majeure.map(0 , 89)%0Ai01.leftHand.ringFinger.map(0 , 104)%0Ai01.leftHand.pinky.map(0 , 91)%0A#===============================%0A%0A#Start the Leap Tracking%0Ai01.leftHand.starLeapTracking()%0A%0A#stop leap tracking%0A#i01.leftHand.stopLeapTracking()%0A
ddf940dc932c04ebd287085ec7d035a93ac5598f
add findmyiphone flask api
ios.py
ios.py
Python
0.000001
@@ -0,0 +1,1636 @@ +from pyicloud import PyiCloudService%0A%0Afrom flask import Flask, jsonify, request, abort%0A%0A%0Aapi = PyiCloudService('[email protected]')%0A%0Aapp = Flask(__name__)%0A%[email protected]('/devices', methods=%5B'GET'%5D)%0Adef device_list():%0A devices = %5B%5D%0A for id, device in api.devices.items():%0A location_info = device.location()%0A device_json = %7B%0A 'id': id,%0A 'name': device.data%5B'name'%5D,%0A 'model': device.data%5B'deviceDisplayName'%5D,%0A 'is_desktop': device.data%5B'isMac'%5D,%0A 'location': %7B%0A 'lat': location_info%5B'latitude'%5D,%0A 'lng': location_info%5B'longitude'%5D,%0A 'source': location_info%5B'positionType'%5D,%0A 'accuracy': location_info%5B'horizontalAccuracy'%5D,%0A 'is_old': location_info%5B'isOld'%5D,%0A 'is_accurate': not location_info%5B'isInaccurate'%5D,%0A 'timestamp': location_info%5B'timeStamp'%5D,%0A %7D if location_info else None,%0A %7D%0A devices.append(device_json)%0A%0A return jsonify(%7B'devices': devices%7D)%0A%0A%[email protected]('/alert', methods=%5B'POST'%5D)%0Adef alert():%0A device_id = request.form%5B'id'%5D%0A%0A subject = request.form.get('subject', '').strip()%0A message = request.form.get('message', '').strip()%0A sounds = request.form.get('sounds')%0A%0A device = api.devices.get(device_id)%0A if not device:%0A abort(404)%0A%0A if not message:%0A device.play_sound(subject=subject)%0A else:%0A device.display_message(subject=subject, message=message, sounds=bool(sounds))%0A return jsonify(%7B'success': True, 'errors': %5B%5D%7D)%0A%0A%0Aif __name__ == '__main__':%0A app.run()%0A
bb7bb2e12d3ccbb55f0b0e6db5d0cb79c3ea8079
Add missing migration for profile items.
km_api/know_me/migrations/0013_remove_profileitem_media_resource.py
km_api/know_me/migrations/0013_remove_profileitem_media_resource.py
Python
0
@@ -0,0 +1,401 @@ +# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.3 on 2017-08-01 14:16%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('know_me', '0012_emergencyitem'),%0A %5D%0A%0A operations = %5B%0A migrations.RemoveField(%0A model_name='profileitem',%0A name='media_resource',%0A ),%0A %5D%0A
a0b9d1977b2aa2366a334231b4dd5dbe047d7122
Add testcase for Category.can_create_events
indico/modules/categories/models/categories_test.py
indico/modules/categories/models/categories_test.py
Python
0.000021
@@ -0,0 +1,1928 @@ +# This file is part of Indico.%0A# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).%0A#%0A# Indico is free software; you can redistribute it and/or%0A# modify it under the terms of the GNU General Public License as%0A# published by the Free Software Foundation; either version 3 of the%0A# License, or (at your option) any later version.%0A#%0A# Indico is distributed in the hope that it will be useful, but%0A# WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU%0A# General Public License for more details.%0A#%0A# You should have received a copy of the GNU General Public License%0A# along with Indico; if not, see %3Chttp://www.gnu.org/licenses/%3E.%0A%0Aimport pytest%0A%0Afrom indico.core.db.sqlalchemy.protection import ProtectionMode%0A%0A%[email protected](('protection_mode', 'creation_restricted', 'acl', 'allowed'), (%0A # not restricted%0A (ProtectionMode.public, False, None, True),%0A (ProtectionMode.protected, False, None, False),%0A (ProtectionMode.protected, False, %7B'read_access': True%7D, True),%0A # restricted - authorized%0A (ProtectionMode.protected, True, %7B'full_access': True%7D, True),%0A (ProtectionMode.protected, True, %7B'roles': %7B'create'%7D%7D, True),%0A # restricted - not authorized%0A (ProtectionMode.public, True, None, False),%0A (ProtectionMode.protected, True, None, False),%0A (ProtectionMode.protected, True, %7B'read_access': True%7D, False)%0A))%0Adef test_can_create_events(dummy_category, dummy_user, protection_mode, creation_restricted, acl, allowed):%0A dummy_category.protection_mode = protection_mode%0A dummy_category.event_creation_restricted = creation_restricted%0A if acl:%0A dummy_category.update_principal(dummy_user, **acl)%0A assert dummy_category.can_create_events(dummy_user) == allowed%0A%0A%0Adef test_can_create_events_no_user(dummy_category):%0A assert not dummy_category.can_create_events(None)%0A
eb54c75c0f5b7e909177777ce935358b7ac25def
Add zip and unzip to zip_file
py_sys/file/zip_file.py
py_sys/file/zip_file.py
Python
0.000001
@@ -0,0 +1,1567 @@ +# coding=utf-8%0A%0Aimport os%0Aimport zipfile%0A%0Aclass ZipFile(object):%0A%0A def __init__(self):%0A pass%0A %0A def zip(self, dir_path, zip_file):%0A file_list = %5B%5D%0A %0A def walk_dir(sub_dir):%0A for root, dirs, files in os.walk(sub_dir):%0A for _file in files:%0A file_list.append(os.path.join(root, _file))%0A for _dir in dirs:%0A walk_dir(_dir)%0A %0A if os.path.isfile(dir_path):%0A file_list.append(dir_path)%0A else :%0A walk_dir(dir_path)%0A %0A zf = zipfile.ZipFile(zip_file, %22w%22, zipfile.zlib.DEFLATED)%0A %0A for tar in file_list:%0A arcname = tar%5Blen(dir_path):%5D%0A zf.write(tar, arcname)%0A zf.close()%0A %0A def unzip(self, zip_file, dir_path):%0A if not os.path.exists(dir_path): os.mkdir(dir_path, 0777)%0A %0A zf_obj = zipfile.ZipFile(zip_file)%0A %0A for zf_name in zf_obj.namelist():%0A zf_name = zf_name.replace('%5C%5C','/')%0A %0A if zf_name.endswith('/'):%0A os.mkdir(os.path.join(dir_path, zf_name))%0A else: %0A ext_file = os.path.join(dir_path, zf_name)%0A ext_dir= os.path.dirname(ext_file)%0A %0A if not os.path.exists(ext_dir): %0A os.mkdir(ext_dir,0777)%0A %0A out_file = open(ext_file, 'wb')%0A out_file.write(zf_obj.read(zf_name))%0A out_file.close()%0A%0A %0A
e27b005e5dc797e2326ab175ef947021c5a85cb7
Add ptt.py
ptt.py
ptt.py
Python
0.000122
@@ -0,0 +1,1623 @@ +import telnetlib%0Aimport re%0A%0ARN = '%5Cr%5Cn'%0AC_L = '%5Cx0C'%0AC_Z = '%5Cx1A'%0AESC = '%5Cx1B'%0A%0Aclass PTT():%0A def __init__(self):%0A self.ptt = telnetlib.Telnet('ptt.cc')%0A self.where = 'login'%0A%0A def login(self, username, password, dup=False):%0A self.__wait_til('%E8%A8%BB%E5%86%8A: ', encoding='big5')%0A self.__send(username, ',', RN)%0A self.__wait_til('%E5%AF%86%E7%A2%BC: ', encoding='big5')%0A self.__send(password, RN)%0A%0A index = self.__expect('%E6%AD%A1%E8%BF%8E%E6%82%A8%E5%86%8D%E5%BA%A6%E6%8B%9C%E8%A8%AA', '%E9%87%8D%E8%A4%87%E7%99%BB%E5%85%A5', '%E8%AB%8B%E5%8B%BF%E9%A0%BB%E7%B9%81%E7%99%BB%E5%85%A5')%5B0%5D%0A if index == 2:%0A self.__send(RN)%0A index = self.__expect('%E6%AD%A1%E8%BF%8E%E6%82%A8%E5%86%8D%E5%BA%A6%E6%8B%9C%E8%A8%AA', '%E9%87%8D%E8%A4%87%E7%99%BB%E5%85%A5')%5B0%5D%0A if index == 1:%0A self.__send('n' if dup else 'y', RN)%0A index = self.__expect('%E6%AD%A1%E8%BF%8E%E6%82%A8%E5%86%8D%E5%BA%A6%E6%8B%9C%E8%A8%AA')%5B0%5D%0A if index == -1:%0A print(%22Login failed%22)%0A self.close()%0A self.__send(RN)%0A%0A index = self.__expect('%E3%80%90%E4%B8%BB%E5%8A%9F%E8%83%BD%E8%A1%A8%E3%80%91', '%E9%8C%AF%E8%AA%A4%E5%98%97%E8%A9%A6')%5B0%5D%0A if index == 1:%0A self.__send('y', RN)%0A # in menu now%0A self.where = 'menu'%0A%0A def close(self):%0A self.ptt.close()%0A print('Connection closed')%0A%0A def __wait_til(self, exp, encoding='utf-8', timeout=None):%0A return self.ptt.read_until(exp.encode(encoding), timeout)%0A%0A def __send(self, *args):%0A s = ''.join(args)%0A self.ptt.write(s.encode())%0A%0A def __expect(self, *args, encoding='utf-8', timeout=5):%0A exp_list = %5Bexp.encode(encoding) for exp in args%5D%0A expect = self.ptt.expect(exp_list, timeout)%0A if expect%5B0%5D == -1:%0A raise TimeoutError(expect%5B2%5D)%0A return expect%0A%0A class TimeoutError(Exception):%0A pass%0A%0Aif __name__ == '__main__':%0A pass%0A
eef2dff2855ef310dbdb6b864a92306cae724ed7
add missing the missing file exceptions.py
pyecharts/exceptions.py
pyecharts/exceptions.py
Python
0.000003
@@ -0,0 +1,41 @@ +class NoJsExtension(Exception):%0A pass%0A
0d3255f8a69fe5192cb36ee42a731293cfd09715
Add VmCorTaxonPhenology Class
backend/geonature/core/gn_profiles/models.py
backend/geonature/core/gn_profiles/models.py
Python
0
@@ -0,0 +1,449 @@ +from geonature.utils.env import DB%0Afrom utils_flask_sqla.serializers import serializable%0A%0A@serializable%0Aclass VmCorTaxonPhenology(DB.Model):%0A __tablename__ = %22vm_cor_taxon_phenology%22%0A __table_args__ = %7B%22schema%22: %22gn_profiles%22%7D%0A cd_ref = DB.Column(DB.Integer)%0A period = DB.Column(DB.Integer)%0A id_nomenclature_life_stage = DB.Column(DB.Integer)%0A id_altitude_range = DB.Column(DB.Integer)%0A count_valid_data = DB.Column(DB.Integer)%0A
c81342ec9fb12da819fb966d3363fb57e6262916
fix unit tests in windows xp
utest/controller/test_resource_import.py
utest/controller/test_resource_import.py
import os import unittest import datafilereader from robotide.controller.commands import AddKeyword, ChangeCellValue,\ CreateNewResource, SaveFile from robot.utils.asserts import assert_equals from robotide.controller.cellinfo import ContentType, CellType class TestResourceImport(unittest.TestCase): def setUp(self): self.res_path = datafilereader.OCCURRENCES_PATH self.res_name = 'new_resource_for_test_creating_and_importing_resource.txt' self.res_full_name = os.path.join(self.res_path, self.res_name) self.new_keyword_name = 'My Keywordian' self.ctrl = datafilereader.construct_chief_controller(datafilereader.OCCURRENCES_PATH) self.suite = datafilereader.get_ctrl_by_name('TestSuite1', self.ctrl.datafiles) self.test = self.suite.tests[0] self.test.execute(ChangeCellValue(0,0,self.new_keyword_name)) self.test.execute(ChangeCellValue(0,1,'value')) def tearDown(self): os.remove(self.res_full_name) def _create_resource(self): self.new_resource = self.ctrl.execute(CreateNewResource(self.res_full_name)) self.new_resource.execute(AddKeyword(self.new_keyword_name, '${moi}')) self.new_resource.execute(SaveFile()) def test_number_of_resources_is_correct(self): original_number_of_resources = len(self.ctrl.resources) self._create_resource() assert_equals(original_number_of_resources+1, len(self.ctrl.resources)) self._add_resource_import_to_suite() assert_equals(original_number_of_resources+1, len(self.ctrl.resources)) def test_creating_and_importing_resource_file(self): self._create_resource() self._verify_unidentified_keyword() self._add_resource_import_to_suite() self._verify_identified_keyword() def test_importing_and_creating_resource_file(self): self._add_resource_import_to_suite() self._verify_unidentified_keyword() self._create_resource() self._verify_identified_keyword() def test_changes_in_resource_file(self): self._create_resource() self._add_resource_import_to_suite() self._keyword_controller.arguments.set_value('') self._check_cells(ContentType.USER_KEYWORD, CellType.MUST_BE_EMPTY) def test_resource_import_knows_resource_after_import_has_been_removed(self): item_without_settings = datafilereader.get_ctrl_by_name('Inner Resource', self.ctrl.datafiles) self.assertEqual(list(item_without_settings.imports), []) self._create_resource() import_ = item_without_settings.imports.add_resource(os.path.join('..', self.res_name)) self.assertTrue(import_ is not None) item_without_settings.imports.delete(0) self.assertEqual(self.new_resource, import_.get_imported_controller()) def test_previously_imported_resource_controller_is_none_by_default(self): self._create_resource() import_controller = self._add_resource_import_to_suite() self.assertEqual(import_controller.get_previous_imported_controller(), None) @property def _keyword_controller(self): return self.ctrl.resources[-1].keywords[-1] def _add_resource_import_to_suite(self): return self.suite.imports.add_resource(self.res_name) def _verify_unidentified_keyword(self): self._check_cells(ContentType.STRING, CellType.UNKNOWN) def _verify_identified_keyword(self): self._check_cells(ContentType.USER_KEYWORD, CellType.MANDATORY) def _check_cells(self, keyword_content_type, value_cell_type): assert_equals(self.test.get_cell_info(0,0).content_type, keyword_content_type) assert_equals(self.test.get_cell_info(0,1).cell_type, value_cell_type)
Python
0.000001
@@ -2633,29 +2633,26 @@ esource( -os.path +'/' .join( +%5B '..', se @@ -2658,24 +2658,25 @@ elf.res_name +%5D ))%0A s
de57220c8da35af9931ed24f7ae302cd06a00962
Fix hyperv copy file error logged incorrect
nova/virt/hyperv/pathutils.py
nova/virt/hyperv/pathutils.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2013 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import shutil from eventlet.green import subprocess from nova.openstack.common import log as logging from oslo.config import cfg LOG = logging.getLogger(__name__) hyperv_opts = [ cfg.StrOpt('instances_path_share', default="", help='The name of a Windows share name mapped to the ' '"instances_path" dir and used by the resize feature ' 'to copy files to the target host. If left blank, an ' 'administrative share will be used, looking for the same ' '"instances_path" used locally'), ] CONF = cfg.CONF CONF.register_opts(hyperv_opts, 'hyperv') CONF.import_opt('instances_path', 'nova.compute.manager') class PathUtils(object): def open(self, path, mode): """Wrapper on __builin__.open used to simplify unit testing.""" import __builtin__ return __builtin__.open(path, mode) def exists(self, path): return os.path.exists(path) def makedirs(self, path): os.makedirs(path) def remove(self, path): os.remove(path) def rename(self, src, dest): os.rename(src, dest) def copyfile(self, src, dest): self.copy(src, dest) def copy(self, src, dest): # With large files this is 2x-3x faster than shutil.copy(src, dest), # especially when copying to a UNC target. # shutil.copyfileobj(...) with a proper buffer is better than # shutil.copy(...) but still 20% slower than a shell copy. # It can be replaced with Win32 API calls to avoid the process # spawning overhead. if subprocess.call(['cmd.exe', '/C', 'copy', '/Y', src, dest]): raise IOError(_('The file copy from %(src)s to %(dest)s failed')) def rmtree(self, path): shutil.rmtree(path) def get_instances_dir(self, remote_server=None): local_instance_path = os.path.normpath(CONF.instances_path) if remote_server: if CONF.hyperv.instances_path_share: path = CONF.hyperv.instances_path_share else: # Use an administrative share path = local_instance_path.replace(':', '$') return '\\\\%(remote_server)s\\%(path)s' % locals() else: return local_instance_path def _check_create_dir(self, path): if not self.exists(path): LOG.debug(_('Creating directory: %s') % path) self.makedirs(path) def _check_remove_dir(self, path): if self.exists(path): LOG.debug(_('Removing directory: %s') % path) self.rmtree(path) def _get_instances_sub_dir(self, dir_name, remote_server=None, create_dir=True, remove_dir=False): instances_path = self.get_instances_dir(remote_server) path = os.path.join(instances_path, dir_name) if remove_dir: self._check_remove_dir(path) if create_dir: self._check_create_dir(path) return path def get_instance_migr_revert_dir(self, instance_name, create_dir=False, remove_dir=False): dir_name = '%s_revert' % instance_name return self._get_instances_sub_dir(dir_name, None, create_dir, remove_dir) def get_instance_dir(self, instance_name, remote_server=None, create_dir=True, remove_dir=False): return self._get_instances_sub_dir(instance_name, remote_server, create_dir, remove_dir) def get_vhd_path(self, instance_name): instance_path = self.get_instance_dir(instance_name) return os.path.join(instance_path, 'root.vhd') def get_base_vhd_dir(self): return self._get_instances_sub_dir('_base') def get_export_dir(self, instance_name): dir_name = os.path.join('export', instance_name) return self._get_instances_sub_dir(dir_name, create_dir=True, remove_dir=True)
Python
0
@@ -2458,16 +2458,54 @@ failed') +%0A %25 locals() )%0A%0A d
18d40200224d68b0ce93c2710516ed63566b1ad3
Add merge migration
osf/migrations/0127_merge_20180822_1927.py
osf/migrations/0127_merge_20180822_1927.py
Python
0.000001
@@ -0,0 +1,339 @@ +# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.13 on 2018-08-22 19:27%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('osf', '0124_merge_20180816_1229'),%0A ('osf', '0126_update_review_group_names'),%0A %5D%0A%0A operations = %5B%0A %5D%0A
a55fee4515c9e6187198a8fc27ec15e7786d5782
Create utils.py
utils.py
utils.py
Python
0.000001
@@ -0,0 +1,1267 @@ +#!/usr/bin/env python%0A%0A'''Python script that must be kept with all of these plugins'''%0A%0Adef color(color, message):%0A '''color forground/background encoding IRC messages'''%0A %0A colors = %7B'white': '00', 'black': '01', 'blue': '02', 'navy': '02',%0A 'green': '03', 'red': '04', 'brown': '05', 'maroon': '05',%0A 'purple': '06', 'orange': '07', 'olive': '07', 'gold': '07',%0A 'yellow': '08', 'lightgreen': '09', 'lime': '09', 'teal': '10',%0A 'cyan': '11', 'lightblue': '12', 'royal': '12', 'lightpurple': '13',%0A 'pink': '13', 'fuchsia': '13', 'grey': '14', 'lightgrey': '0', 'silver': '0'%7D%0A color = str(color).lower()%0A message = str(message)%0A if '/' in color:%0A color = color.split('/')%0A message = '%5Cx03' + colors%5Bcolor%5B0%5D%5D + ',' + colors%5Bcolor%5B1%5D%5D + message + '%5Cx03'%0A else: %0A message = '%5Cx03' + colors%5Bcolor%5D + message + '%5Cx03'%0A return message%0A%0Adef bold(message):%0A '''bold encoding IRC messages'''%0A return ('%5Cx02' + str(message) + '%5Cx02')%0A%0Adef italic(message):%0A '''italicize encoding IRC messages'''%0A return ('%5Cx16' + str(message) + '%5Cx16')%0A%0Adef underline(message):%0A '''underlined encoding IRC messages'''%0A return ('%5Cx1f' + str(message) + '%5Cx1f')%0A
36e6ff93b270672e0918e5ac0d7f9698834ad6ae
add Pathfinder skeleton
game/pathfinding.py
game/pathfinding.py
Python
0
@@ -0,0 +1,413 @@ +# This Source Code Form is subject to the terms of the Mozilla Public%0A# License, v. 2.0. If a copy of the MPL was not distributed with this%0A# file, You can obtain one at http://mozilla.org/MPL/2.0/.%0A%0A%22%22%22pathfinding.py: %22%22%22%0A%0A%0Aclass Pathfinder(object):%0A def __init__(self, size_x, size_y):%0A self._size_x = size_x%0A self._size_y = size_y%0A%0A def find_path(self, from_coords, to_coords):%0A pass
ca956d335ad6bf6e190869d98c7abb3b554dfa3d
Create TS3IdleBot.py
TS3IdleBot.py
TS3IdleBot.py
Python
0
@@ -0,0 +1,2257 @@ +import telnetlib%0Aimport time%0Afrom config import config%0A%0A%0Adef getClients():%0A print %22Getting a list of clients.%22%0A telnet.write(%22clientlist -times%5Cn%22)%0A clients = telnet.read_until(%22msg=ok%22)%0A clients = clients.replace(%22 %22, %22%5Cn%22)%0A clients = clients.replace(%22%5Cr%22, %22%22)%0A clients = clients.split(%22%7C%22)%0A cLen = len(clients)%0A print clients%0A for i in range(0, cLen):%0A try:%0A if config%5B%22botname%22%5D in clients%5Bi%5D:%0A clients.remove(clients%5Bi%5D)%0A else:%0A clients%5Bi%5D = clients%5Bi%5D.split(%22%5Cn%22)%0A clients%5Bi%5D = filter(None,clients%5Bi%5D)%0A cLen -= 1%0A except IndexError:%0A print %22Somehow we've escaped the bounds of the loop. :O Skip it and we should be fine.%22%0A return clients%0A%0Adef moveIdlers(clients):%0A print %22Checking for idlers.%22%0A for i in range(0, len(clients)):%0A if float(clients%5Bi%5D%5B5%5D.strip(%22client_idle_time=%22)) %3E float(config%5B%22idle%22%5D)*60000:%0A print %22Moving user %22 + clients%5Bi%5D%5B3%5D.replace(%22client_nickname=%22, %22%22) + %22 to idle channel.%22%0A telnet.write(%22clientmove clid=%22+clients%5Bi%5D%5B0%5D.strip(%22clid=%22)+ %22 cid=13%5Cn%22)%0A telnet.read_until(%22msg=ok%22)%0A print %22Done checking for idlers.%22%0A%0Aprint %22TS3IdleBot%22%0Aprint %22http://www.github.com/rmgr%5Cn%22%0Aprint %22Exit TS3IdleBot with CTRL + C.%22%0Aprint %22Connecting to server %22 + config%5B%22host%22%5D+ %22:%22 + config%5B%22port%22%5D%0Atelnet = telnetlib.Telnet(config%5B%22host%22%5D,config%5B%22port%22%5D)%0Atelnet.open(telnet.host, telnet.port)%0Atelnet.write(%22login %22+config%5B%22user%22%5D+%22 %22+config%5B%22pass%22%5D+%22%5Cn%22)%0Atelnet.read_until(%22msg=ok%22)%0Aprint %22Connected successfully.%22%0A%0Aprint %22Using virtual server %22+config%5B%22serverid%22%5D%0Atelnet.write(%22use sid=%22+config%5B%22serverid%22%5D + %22%5Cn%22)%0Atelnet.read_until(%22msg=ok%22)%0Aprint %22Server selection successful.%22%0A%0Aprint %22Setting bot nickname as %22 + config%5B%22botname%22%5D + %22.%22%0Atelnet.write(%22clientupdate client_nickname=%22+config%5B%22botname%22%5D+%22%5Cn%22)%0Atelnet.read_until(%22msg=ok%22)%0Aprint %22Set successfully.%22%0A%0Awhile True:%0A try:%0A clients = getClients()%0A moveIdlers(clients)%0A print %22Sleeping for 5 minutes.%22%0A time.sleep(300)%0A except KeyboardInterrupt:%0A print %22Exiting TS3IdleBot%22%0A exit()%0Atelnet.write(%22logout%5Cn%22)%0Atelnet.read_until(%22msg=ok%22)%0Atelnet.close()%0A
ae0ebdccfffffbad259842365712bd4b6e52fc8e
add test files for HDF5 class and read_feats function
sprocket/util/tests/test_hdf5.py
sprocket/util/tests/test_hdf5.py
Python
0
@@ -0,0 +1,1187 @@ +from __future__ import division, print_function, absolute_import%0A%0Aimport os%0Aimport unittest%0A%0Aimport numpy as np%0Afrom sprocket.util.hdf5 import HDF5, read_feats%0A%0Adirpath = os.path.dirname(os.path.realpath(__file__))%0Alistf = os.path.join(dirpath, '/data/test.h5')%0A%0Aclass hdf5FunctionsTest(unittest.TestCase):%0A%0A def test_HDF5(self):%0A data1d = np.random.rand(100)%0A data2d = np.random.rand(100).reshape(50, 2)%0A%0A # write test%0A path = os.path.join(dirpath, 'data/test.h5')%0A h5 = HDF5(path, 'w')%0A h5.save(data1d, '1d')%0A h5.save(data2d, '2d')%0A h5.close()%0A%0A # open test%0A tmph5 = HDF5(path, 'r')%0A tmp1d = tmph5.read(ext='1d')%0A tmp2d = tmph5.read(ext='2d')%0A tmph5.close()%0A%0A assert np.allclose(tmp1d, data1d)%0A assert np.allclose(tmp2d, data2d)%0A%0A # tset read_feats function%0A listpath = os.path.join(dirpath, 'data/test.list')%0A with open(listpath, 'w') as fp:%0A fp.write('data/test')%0A list1d = read_feats(listpath, dirpath, ext='1d')%0A assert np.allclose(list1d%5B0%5D, data1d)%0A%0A # remove files%0A os.remove(path)%0A os.remove(listpath)%0A
26fcbefee171f8d56504a7eba121027f0c5be8b5
Add migration for new overrides table
lms/djangoapps/grades/migrations/0013_persistentsubsectiongradeoverride.py
lms/djangoapps/grades/migrations/0013_persistentsubsectiongradeoverride.py
Python
0
@@ -0,0 +1,1120 @@ +# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('grades', '0012_computegradessetting'),%0A %5D%0A%0A operations = %5B%0A migrations.CreateModel(%0A name='PersistentSubsectionGradeOverride',%0A fields=%5B%0A ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),%0A ('created', models.DateTimeField(auto_now_add=True, db_index=True)),%0A ('modified', models.DateTimeField(auto_now=True, db_index=True)),%0A ('earned_all_override', models.FloatField(null=True, blank=True)),%0A ('possible_all_override', models.FloatField(null=True, blank=True)),%0A ('earned_graded_override', models.FloatField(null=True, blank=True)),%0A ('possible_graded_override', models.FloatField(null=True, blank=True)),%0A ('grade', models.OneToOneField(related_name='override', to='grades.PersistentSubsectionGrade')),%0A %5D,%0A ),%0A %5D%0A
e5d3fea99d58a1b02ebe84148d63330ea8d5c3a0
Create WordLadder.py
WordLadder.py
WordLadder.py
Python
0
@@ -0,0 +1,299 @@ +'''%0AGiven a source word, target word and an English dictionary, transform the source word to target by %0Achanging/adding/removing 1 character at a time, while all intermediate words being valid English words. %0AReturn the transformation chain which has the smallest number of intermediate words.%0A%0A'''%0A
4ba2f92a9712530d084823dae52f54167f2f3afb
fix test source to work with empty msgs
new_pmlib/TestSimpleSource.py
new_pmlib/TestSimpleSource.py
#========================================================================= # TestSimpleSource #========================================================================= # This class will output messages on a val/rdy interface from a # predefined list. # from new_pymtl import * from ValRdyBundle import OutValRdyBundle class TestSimpleSource( Model ): #----------------------------------------------------------------------- # Constructor #----------------------------------------------------------------------- def __init__( s, nbits, msgs ): s.out = OutValRdyBundle( nbits ) s.done = OutPort ( 1 ) s.msgs = msgs s.idx = 0 #----------------------------------------------------------------------- # Tick #----------------------------------------------------------------------- def elaborate_logic( s ): @s.tick def tick(): # Handle reset if s.reset: s.out.msg.next = s.msgs[0] s.out.val.next = False s.done.next = False return # Check if we have more messages to send. if ( s.idx == len(s.msgs) ): s.out.msg.next = s.msgs[0] s.out.val.next = False s.done.next = True return # At the end of the cycle, we AND together the val/rdy bits to # determine if the output message transaction occured out_go = s.out.val and s.out.rdy # If the output transaction occured, then increment the index. if out_go: s.idx = s.idx + 1 # The output message is always the indexed message in the list, or if # we are done then it is the first message again. if ( s.idx < len(s.msgs) ): s.out.msg.next = s.msgs[s.idx] s.out.val.next = True s.done.next = False else: s.out.msg.next = s.msgs[0] s.out.val.next = False s.done.next = True #----------------------------------------------------------------------- # Line tracing #----------------------------------------------------------------------- def line_trace( s ): return "({:2}) {}".format( s.idx, s.out )
Python
0
@@ -922,16 +922,37 @@ .reset:%0A + if s.msgs:%0A @@ -1132,32 +1132,53 @@ len(s.msgs) ):%0A + if s.msgs:%0A s.out.ms
75aabd425bd32a9467d7a06b250a0a5b1f5ba852
Add more comments
application/serializer.py
application/serializer.py
Python
0
@@ -0,0 +1,578 @@ +'''%0AThis module maps the data that will be used by the marshall when returning the%0Adata to the user%0A'''%0A%0Afrom flask_restful import fields%0A%0Abucket_list_item_serializer = %7B%0A 'item_id': fields.Integer,%0A 'name': fields.String,%0A 'date_created': fields.DateTime,%0A 'date_modified': fields.DateTime,%0A 'done': fields.Boolean%0A%7D%0A%0Abucket_list_serializer = %7B%0A 'id': fields.Integer,%0A 'name': fields.String,%0A 'items':fields.Nested(bucket_list_item_serializer),%0A 'created_by': fields.String,%0A 'date_created': fields.DateTime,%0A 'date_modified': fields.DateTime%0A%7D
da0f31d6ca5aa8f425c86b9c0caf965f062e1dba
test buying max clicks and gen clicks in the same test
functional-tests/suite6.py
functional-tests/suite6.py
Python
0
@@ -0,0 +1,990 @@ +from clickerft.cft import Cft%0Afrom time import sleep%0A%0A%0Aclass Suite4(Cft):%0A%0A def test_buy_target_max_and_gen(self):%0A %22%22%22%0A buy clicks until we have 50 max clicks of 50%0A and 10 clicks/sec%0A %22%22%22%0A targetGen = 4%0A while int(self.clicksPerGeneration.text) %3C targetGen:%0A clicksOwned = int(self.clicksOwned.text)%0A priceGen = int(self.pincreaseClicksPerGeneration.text)%0A for ii in xrange(min(clicksOwned, priceGen)):%0A self.increaseClicksPerGeneration.click()%0A assert int(self.clicksPerGeneration.text) == targetGen%0A%0A targetMax = 12%0A while int(self.maxClicks.text) %3C targetMax:%0A clicksOwned = int(self.clicksOwned.text)%0A priceMax = int(self.pincreaseMaxClicks.text)%0A for ii in xrange(min(clicksOwned, priceMax)):%0A self.increaseMaxClicks.click()%0A assert int(self.maxClicks.text) == targetMax%0A%0A%0Aif __name__ == '__main__':%0A Suite4()%0A
158f04702b6c1dcda9981d8da05fe059e84c3f90
Add example with churches.
examples/churches.py
examples/churches.py
Python
0
@@ -0,0 +1,545 @@ +# -*- coding: utf-8 -*-%0A'''%0AThis script demonstrates using the AATProvider to get the concept of%0AChurches.%0A'''%0A%0Afrom skosprovider_getty.providers import AATProvider%0A%0Aaat = AATProvider(metadata=%7B'id': 'AAT'%7D)%0A%0Achurches = aat.get_by_id(300007466)%0A%0Alang = %5B'en', 'nl', 'es', 'de'%5D%0A%0Aprint('Labels')%0Aprint('------')%0Afor l in churches.labels:%0A print(l.language + ': ' + l.label.decode('utf-8') + ' %5B' + l.type + '%5D')%0A%0Aprint('Notes')%0Aprint('-----')%0Afor n in churches.notes:%0A print(n.language + ': ' + n.note.decode('utf-8') + ' %5B' + n.type + '%5D')%0A
7c82a2a8887d25ef86e5d0004cf0a0e0bc4b23ac
Create CodingContestTorontoParkingTickets2013.py
CodingContestTorontoParkingTickets2013.py
CodingContestTorontoParkingTickets2013.py
Python
0.000001
@@ -0,0 +1,1083 @@ +import re%0Afrom collections import defaultdict%0A%0Aprocessed_data = defaultdict(int) # dict to capture reduced dataset info, default value == 0%0Aonly_chars = re.compile('%5CD+').search # pre-compiled reg-exp, for fast run time, to get street name, ignoring numbers%0A%0A# import raw data file with parking information%0Awith open('Parking_data.csv', 'r') as raw_data:%0A # skip the first line of header data%0A next%0A # iterate over the remaining file line by line%0A for line in raw_data:%0A # split line by ',' into an array%0A worked_line = line.split(',')%0A # get and clean up street name for dict use and, if valid name found, collect fine amount in dict%0A try:%0A processed_data%5Bonly_chars(worked_line%5B7%5D).group(0).lstrip()%5D += int(worked_line%5B4%5D)%0A except:%0A next%0A%0A# find street with greatest total fines processed_data%0Ahighest_street = max(processed_data, key=processed_data.get)%0Ahighest_fine = processed_data%5Bhighest_street%5D%0A%0A# print out the results%0Aprint('Highest revenue street: %7B0%7D with $%7B1%7D.'.format(highest_street, highest_fine))%0A
f8ee383cc3b3f1f9166627e81a64af4939e4de10
add amqp style routing for virtual channels, allows memory backend to behave like amqp
example/topic.py
example/topic.py
Python
0
@@ -0,0 +1,2726 @@ +from kombu.connection import BrokerConnection%0Afrom kombu.messaging import Exchange, Queue, Consumer, Producer%0A%0A# configuration, normally in an ini file%0Aexchange_name = %22test.shane%22%0Aexchange_type = %22topic%22%0Aexchange_durable = True%0Amessage_serializer = %22json%22%0Aqueue_name = %22test.q%22%0A%0A# 1. setup the connection to the exchange%0A# hostname,userid,password,virtual_host not used with memory backend%0Acons_conn = BrokerConnection(hostname=%22localhost%22,%0A userid=%22guest%22,%0A password=%22guest%22,%0A virtual_host=%22/%22,%0A transport=%22memory%22)%0Acons_chan = cons_conn.channel()%0Acons_exch = Exchange(exchange_name, type=exchange_type, durable=exchange_durable)%0A%0Apub_conn = BrokerConnection(hostname=%22localhost%22,%0A userid=%22guest%22,%0A password=%22guest%22,%0A virtual_host=%22/%22,%0A transport=%22memory%22)%0Apub_chan = pub_conn.channel()%0Apub_exch = Exchange(exchange_name, type=exchange_type, durable=exchange_durable)%0A%0A# 2. setup the consumer, the consumer declares/creates the queue, if you%0A# publish to a queue before there is a consumer it will fail unless the queue%0A# was first created and is durable%0Aclass AConsumer:%0A def __init__(self, queue_name, key):%0A self.queue = Queue(queue_name, exchange=cons_exch, routing_key=key)%0A self.consumer = Consumer(cons_chan, %5Bself.queue%5D)%0A self.consumer.consume()%0A%0A def mq_callback(message_data, message):%0A print(%22%25s: %25r: %25r%22 %25 (key, message.delivery_info, message_data,))%0A #message.ack()%0A self.consumer.register_callback(mq_callback)%0A%0Ac1 = AConsumer(%22test_1%22,%22test.1%22)%0Ac2 = AConsumer(%22testing%22,%22test.ing%22)%0A# consumers can use simple pattern matching when defining a queue%0Ac3 = AConsumer(%22test_all%22,%22test.*%22)%0A%0A# 3. publish something to consume%0A# publishers always send to a specific route, the mq will route to the queues%0Aproducer = Producer(pub_chan, exchange=pub_exch, serializer=message_serializer)%0Aproducer.publish(%7B%22name%22: %22Shane Caraveo%22, %22username%22: %22mixedpuppy%22%7D, routing_key=%22test.1%22)%0Aproducer.publish(%7B%22name%22: %22Micky Mouse%22, %22username%22: %22donaldduck%22%7D, routing_key=%22test.ing%22)%0Aproducer.publish(%7B%22name%22: %22Anonymous%22, %22username%22: %22whoami%22%7D, routing_key=%22test.foobar%22)%0A%0Adef have_messages():%0A return sum(%5Bq.qsize() for q in cons_chan.queues.values()%5D)%0A%0A# 5. run the event loop%0Awhile have_messages():%0A try:%0A cons_conn.drain_events()%0A except KeyboardInterrupt:%0A print%0A print %22quitting%22%0A break%0A except Exception, e:%0A import traceback%0A print traceback.format_exc()%0A break%0A%0A
aff827e9cc02bcee6cf8687e1dff65f39daaf6c6
Add a failing test to the landing page to check for upcoming events.
workshops/test/test_landing_page.py
workshops/test/test_landing_page.py
Python
0
@@ -0,0 +1,978 @@ +from django.core.urlresolvers import reverse%0Afrom django.test import TestCase%0Afrom mock import patch%0Afrom datetime import date%0A%0Aclass FakeDate(date):%0A %22A fake replacement for date that can be mocked for testing.%22%0A pass%0A%0A @classmethod%0A def today(cls):%0A return cls(2013, 12, 7)%0A%0A@patch('workshops.models.datetime.date', FakeDate)%0Aclass TestLandingPage(TestCase):%0A %22Tests for the workshop landing page%22%0A%0A fixtures = %5B'event_test'%5D%0A%0A def test_has_upcoming_events(self):%0A %22%22%22Test that the landing page is passed some%0A upcoming_events in the context.%0A %22%22%22%0A%0A response = self.client.get(reverse('index'))%0A%0A # This will fail if the context variable doesn't exist%0A upcoming_events = response.context%5B'upcoming_events'%5D%0A%0A # There are 2 upcoming events%0A assert len(upcoming_events) == 2%0A%0A # They should all start with upcoming%0A assert all(%5Be.slug%5B:8%5D == 'upcoming' for e in upcoming_events%5D)%0A
91918be596c83f468c6c940df7326896aa6082e7
Fix stringify on multichoice forms
adagios/forms.py
adagios/forms.py
# -*- coding: utf-8 -*- from django.utils.encoding import smart_str from django import forms class AdagiosForm(forms.Form): """ Base class for all forms in this module. Forms that use pynag in any way should inherit from this one. """ def clean(self): cleaned_data = {} tmp = super(AdagiosForm, self).clean() for k,v in tmp.items(): if isinstance(k, (unicode)): k = smart_str(k) if isinstance(v, (unicode)): v = smart_str(v) cleaned_data[k] = smart_str(v) return cleaned_data
Python
0.000004
@@ -541,28 +541,17 @@ ta%5Bk%5D = -smart_str(v) +v %0A
cb7bb1d9f24706f3cce2e9841595ee80ce7e2c7f
Implement GetKeyboardType
angr/procedures/win_user32/keyboard.py
angr/procedures/win_user32/keyboard.py
Python
0
@@ -0,0 +1,376 @@ +import angr%0A%0Aclass GetKeyboardType(angr.SimProcedure):%0A def run(self, param):%0A # return the values present at time of author's testing%0A if self.state.solver.is_true(param == 0):%0A return 4%0A if self.state.solver.is_true(param == 1):%0A return 0%0A if self.state.solver.is_true(param == 2):%0A return 12%0A return 0%0A
8692557a3389403b7a3450065d99e3750d91b2ed
Create views.py
pagination_bootstrap/views.py
pagination_bootstrap/views.py
Python
0
@@ -0,0 +1 @@ +%0A
84bf9d04a2bef01a0cd1ffc7db156c0e70f27b91
Disable tests requiring pairing
test/client/audio_consumer_test.py
test/client/audio_consumer_test.py
# Copyright 2016 Mycroft AI, Inc. # # This file is part of Mycroft Core. # # Mycroft Core is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Mycroft Core is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>. import unittest from Queue import Queue import speech_recognition from os.path import dirname, join from speech_recognition import WavFile, AudioData from mycroft.client.speech.listener import AudioConsumer, RecognizerLoop from mycroft.client.speech.local_recognizer import LocalRecognizer from mycroft.stt import MycroftSTT __author__ = 'seanfitz' class MockRecognizer(object): def __init__(self): self.transcriptions = [] def recognize_mycroft(self, audio, key=None, language=None, show_all=False): if len(self.transcriptions) > 0: return self.transcriptions.pop(0) else: raise speech_recognition.UnknownValueError() def set_transcriptions(self, transcriptions): self.transcriptions = transcriptions class AudioConsumerTest(unittest.TestCase): """ AudioConsumerTest """ def setUp(self): self.loop = RecognizerLoop() self.queue = Queue() self.recognizer = MockRecognizer() self.consumer = AudioConsumer( self.loop.state, self.queue, self.loop, MycroftSTT(), LocalRecognizer(self.loop.wakeup_recognizer.key_phrase, self.loop.wakeup_recognizer.phonemes, "1e-16"), self.loop.mycroft_recognizer) def __create_sample_from_test_file(self, sample_name): root_dir = dirname(dirname(dirname(__file__))) filename = join( root_dir, 'test', 'client', 'data', sample_name + '.wav') wavfile = WavFile(filename) with wavfile as source: return AudioData( source.stream.read(), wavfile.SAMPLE_RATE, wavfile.SAMPLE_WIDTH) def test_word_extraction(self): """ This is intended to test the extraction of the word: ``mycroft``. The values for ``ideal_begin`` and ``ideal_end`` were found using an audio tool like Audacity and they represent a sample value position of the audio. ``tolerance`` is an acceptable margin error for the distance between the ideal and actual values found by the ``WordExtractor`` """ # TODO: implement WordExtractor test without relying on the listener return audio = self.__create_sample_from_test_file('weather_mycroft') self.queue.put(audio) tolerance = 4000 ideal_begin = 70000 ideal_end = 92000 monitor = {} self.recognizer.set_transcriptions(["what's the weather next week"]) def wakeword_callback(message): monitor['pos_begin'] = message.get('pos_begin') monitor['pos_end'] = message.get('pos_end') self.loop.once('recognizer_loop:wakeword', wakeword_callback) self.consumer.read() actual_begin = monitor.get('pos_begin') self.assertIsNotNone(actual_begin) diff = abs(actual_begin - ideal_begin) self.assertTrue( diff <= tolerance, str(diff) + " is not less than " + str(tolerance)) actual_end = monitor.get('pos_end') self.assertIsNotNone(actual_end) diff = abs(actual_end - ideal_end) self.assertTrue( diff <= tolerance, str(diff) + " is not less than " + str(tolerance)) @unittest.skip('Disabled while unittests are brought upto date') def test_wakeword_in_beginning(self): self.queue.put(self.__create_sample_from_test_file('weather_mycroft')) self.recognizer.set_transcriptions(["what's the weather next week"]) monitor = {} def callback(message): monitor['utterances'] = message.get('utterances') self.loop.once('recognizer_loop:utterance', callback) self.consumer.read() utterances = monitor.get('utterances') self.assertIsNotNone(utterances) self.assertTrue(len(utterances) == 1) self.assertEquals("what's the weather next week", utterances[0]) @unittest.skip('Disabled while unittests are brought upto date') def test_wakeword(self): self.queue.put(self.__create_sample_from_test_file('mycroft')) self.recognizer.set_transcriptions(["silence"]) monitor = {} def callback(message): monitor['utterances'] = message.get('utterances') self.loop.once('recognizer_loop:utterance', callback) self.consumer.read() utterances = monitor.get('utterances') self.assertIsNotNone(utterances) self.assertTrue(len(utterances) == 1) self.assertEquals("silence", utterances[0]) def test_ignore_wakeword_when_sleeping(self): self.queue.put(self.__create_sample_from_test_file('mycroft')) self.recognizer.set_transcriptions(["not detected"]) self.loop.sleep() monitor = {} def wakeword_callback(message): monitor['wakeword'] = message.get('utterance') self.loop.once('recognizer_loop:wakeword', wakeword_callback) self.consumer.read() self.assertIsNone(monitor.get('wakeword')) self.assertTrue(self.loop.state.sleeping) def test_wakeup(self): self.queue.put(self.__create_sample_from_test_file('mycroft_wakeup')) self.loop.sleep() self.consumer.read() self.assertFalse(self.loop.state.sleeping) def test_stop(self): self.queue.put(self.__create_sample_from_test_file('mycroft')) self.consumer.read() self.queue.put(self.__create_sample_from_test_file('stop')) self.recognizer.set_transcriptions(["stop"]) monitor = {} def utterance_callback(message): monitor['utterances'] = message.get('utterances') self.loop.once('recognizer_loop:utterance', utterance_callback) self.consumer.read() utterances = monitor.get('utterances') self.assertIsNotNone(utterances) self.assertTrue(len(utterances) == 1) self.assertEquals("stop", utterances[0]) def test_record(self): self.queue.put(self.__create_sample_from_test_file('mycroft')) self.consumer.read() self.queue.put(self.__create_sample_from_test_file('record')) self.recognizer.set_transcriptions(["record"]) monitor = {} def utterance_callback(message): monitor['utterances'] = message.get('utterances') self.loop.once('recognizer_loop:utterance', utterance_callback) self.consumer.read() utterances = monitor.get('utterances') self.assertIsNotNone(utterances) self.assertTrue(len(utterances) == 1) self.assertEquals("record", utterances[0])
Python
0.000001
@@ -6040,32 +6040,101 @@ tate.sleeping)%0A%0A + @unittest.skip('Disabled while unittests are brought upto date')%0A def test_sto @@ -6768,32 +6768,101 @@ utterances%5B0%5D)%0A%0A + @unittest.skip('Disabled while unittests are brought upto date')%0A def test_rec
060c6d2eeea2235cda955c873b50e0aa2a4accd0
use 20
farmer/models.py
farmer/models.py
#coding=utf8 import os import time import json from datetime import datetime from commands import getstatusoutput from django.db import models class Job(models.Model): # hosts, like web_servers:host1 . inventories = models.TextField(null = False, blank = False) # 0, do not use sudo; 1, use sudo . sudo = models.BooleanField(default = True) # for example: ansible web_servers -m shell -a 'du -sh /tmp' # the 'du -sh /tmp' is cmd here cmd = models.TextField(null = False, blank = False) # return code of this job rc = models.IntegerField(null = True) result = models.TextField(null = True) start = models.DateTimeField(null = True) end = models.DateTimeField(null = True) @property def cmd_shell(self): option = self.sudo and '--sudo -m shell -a' or '-m shell -a' return 'ansible %s %s "%s"' % (self.inventories, option, self.cmd) def run(self): if os.fork() == 0: tmpdir = '/tmp/ansible_%s' % time.time() os.mkdir(tmpdir) self.start = datetime.now() self.save() cmd_shell = self.cmd_shell + ' -t ' + tmpdir status, output = getstatusoutput(cmd_shell) self.end = datetime.now() result = {} for f in os.listdir(tmpdir): result[f] = json.loads(open(tmpdir + '/' + f).read()) self.rc = status self.result = json.dumps(result) self.save() os.system('rm -rf ' + tmpdir) def __unicode__(self): return self.cmd_shell
Python
0.99981
@@ -802,16 +802,22 @@ '--sudo +-f 20 -m shell @@ -825,16 +825,22 @@ -a' or ' +-f 20 -m shell
799109759114d141d71bed777b9a1ac2ec26a264
add Red object detection
python/ObjectDetection/RedExtractObject.py
python/ObjectDetection/RedExtractObject.py
Python
0.000002
@@ -0,0 +1,683 @@ +import cv2%0Aimport numpy as np%0A%0Avideo = cv2.VideoCapture(0)%0A%0Awhile (1):%0A%0A # Take each frame%0A _, frame = video.read()%0A # Convert BGR to HSV%0A hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)%0A%0A # define range of blue color in HSV%0A lower_red = np.array(%5B150, 50, 50%5D)%0A upper_red = np.array(%5B255, 255, 150%5D)%0A # Threshold the HSV image to get only blue colors%0A mask = cv2.inRange(hsv, lower_red, upper_red) # Bitwise-AND mask and original image%0A res = cv2.bitwise_and(frame, frame, mask=mask)%0A cv2.imshow('frame', frame)%0A cv2.imshow('mask', mask)%0A cv2.imshow('res', res)%0A k = cv2.waitKey(5) & 0xFF%0A if k == 27:%0A break%0Acv2.destroyAllWindows()
21490bd6cd03d159a440b2c13a6b4641c789c954
Add example
examples/example.py
examples/example.py
Python
0.000003
@@ -0,0 +1,1493 @@ +import sys%0A%0Afrom tumblpy import Tumblpy%0A%0Akey = raw_input('App Consumer Key: ')%0Asecret = raw_input('App Consumer Secret: ')%0A%0Aif not 'skip-auth' in sys.argv:%0A t = Tumblpy(key, secret)%0A%0A callback_url = raw_input('Callback URL: ')%0A%0A auth_props = t.get_authentication_tokens(callback_url=callback_url)%0A auth_url = auth_props%5B'auth_url'%5D%0A%0A OAUTH_TOKEN_SECRET = auth_props%5B'oauth_token_secret'%5D%0A%0A print('Connect with Tumblr via: %7B%7D'.format(auth_url))%0A%0A oauth_token = raw_input('OAuth Token (from callback url): ')%0A oauth_verifier = raw_input('OAuth Verifier (from callback url): ')%0A%0A t = Tumblpy(key, secret, oauth_token, OAUTH_TOKEN_SECRET)%0A%0A authorized_tokens = t.get_authorized_tokens(oauth_verifier)%0A%0A final_oauth_token = authorized_tokens%5B'oauth_token'%5D%0A final_oauth_token_secret = authorized_tokens%5B'oauth_token_secret'%5D%0A%0A print('OAuth Token: %7B%7D'.format(final_oauth_token))%0A print('OAuth Token Secret: %7B%7D'.format(final_oauth_token_secret))%0Aelse:%0A final_oauth_token = raw_input('OAuth Token: ')%0A final_oauth_token_secret = raw_input('OAuth Token Secret: ')%0A%0At = Tumblpy(key, secret, final_oauth_token, final_oauth_token_secret)%0A%0Ablog_url = t.post('user/info')%0Ablog_url = blog_url%5B'user'%5D%5B'blogs'%5D%5B0%5D%5B'url'%5D%0A%0Aprint('Your blog url is: %7B%7D'.format(blog_url))%0A%0Aposts = t.posts(blog_url)%0A%0Aprint('Here are some posts this blog has made:', posts)%0A%0A# print t.post('post', blog_url=blog_url, params=%7B'type':'text', 'title': 'Test', 'body': 'Lorem ipsum.'%7D)%0A
ece6fb4561e338e32e8527a068cd386f00886a67
Add example with reuters dataset.
examples/reuters.py
examples/reuters.py
Python
0
@@ -0,0 +1,1979 @@ +%22%22%22shell%0A!pip install -q -U pip%0A!pip install -q -U autokeras==1.0.8%0A!pip install -q git+https://github.com/keras-team/[email protected]%0A%22%22%22%0A%0A%22%22%22%0ASearch for a good model for the%0A%5BReuters%5D(https://keras.io/ja/datasets/#_5) dataset.%0A%22%22%22%0A%0Aimport tensorflow as tf%0Afrom tf.keras.datasets import reuters%0Aimport numpy as np%0A%0Aimport autokeras as ak%0A%0A# Prepare the dataset.%0Adef reuters_raw(max_features=20000):%0A%0A index_offset = 3 # word index offset%0A%0A (x_train, y_train), (x_test, y_test) = tf.keras.datasets.reuters.load_data(%0A num_words=max_features,%0A index_from=index_offset)%0A x_train = x_train%0A y_train = y_train.reshape(-1, 1)%0A x_test = x_test%0A y_test = y_test.reshape(-1, 1)%0A%0A word_to_id = tf.keras.datasets.reuters.get_word_index()%0A word_to_id = %7Bk: (v + index_offset) for k, v in word_to_id.items()%7D%0A word_to_id%5B%22%3CPAD%3E%22%5D = 0%0A word_to_id%5B%22%3CSTART%3E%22%5D = 1%0A word_to_id%5B%22%3CUNK%3E%22%5D = 2%0A%0A id_to_word = %7Bvalue: key for key, value in word_to_id.items()%7D%0A x_train = list(map(lambda sentence: ' '.join(%0A id_to_word%5Bi%5D for i in sentence), x_train))%0A x_test = list(map(lambda sentence: ' '.join(%0A id_to_word%5Bi%5D for i in sentence), x_test))%0A x_train = np.array(x_train, dtype=np.str)%0A x_test = np.array(x_test, dtype=np.str)%0A return (x_train, y_train), (x_test, y_test)%0A%0A%0A# Prepare the data.%0A(x_train, y_train), (x_test, y_test) = reuters_raw()%0Aprint(x_train.shape) # (8982,)%0Aprint(y_train.shape) # (8982, 1)%0Aprint(x_train%5B0%5D%5B:50%5D) # %3CSTART%3E %3CUNK%3E %3CUNK%3E said as a result of its decemb%0A%0A# Initialize the TextClassifier%0Aclf = ak.TextClassifier(%0A max_trials=5,%0A overwrite=True,%0A)%0A%0A# Callback to avoid overfitting with the EarlyStopping.%0Acbs = %5B%0A tf.keras.callbacks.EarlyStopping(patience=3),%0A%5D%0A%0A# Search for the best model.%0Aclf.fit(%0A x_train,%0A y_train,%0A epochs=10,%0A callback=cbs%0A)%0A%0A# Evaluate on the testing data.%0Aprint('Accuracy: %7Baccuracy%7D'.format(accuracy=clf.evaluate(x_test, y_test)))%0A
315914bbec88e11bf5ed3bcab29218592549eccf
Create Kmeans.py
Kmeans.py
Kmeans.py
Python
0
@@ -0,0 +1,2043 @@ +import collections%0Afrom nltk import word_tokenize%0Afrom nltk.corpus import stopwords%0Afrom nltk.stem import PorterStemmer%0Afrom sklearn.cluster import KMeans%0Afrom sklearn.feature_extraction.text import TfidfVectorizer%0Afrom pprint import pprint%0Aimport csv%0Aimport pandas%0A%0Adef word_tokenizer(text):%0A #tokenizes and stems the text%0A tokens = word_tokenize(text)%0A stemmer = PorterStemmer()%0A tokens = %5Bstemmer.stem(t) for t in tokens if t not in stopwords.words('english')%5D%0A return tokens%0A%0A%0Adef cluster_sentences(sentences, nb_of_clusters=5):%0A tfidf_vectorizer = TfidfVectorizer(tokenizer=word_tokenizer,%0A%09 stop_words=stopwords.words('english'),%0A%09 max_df=0.99,%0A%09 min_df=0.01,%0A%09 lowercase=True)%0A #builds a tf-idf matrix for the sentences%0A tfidf_matrix = tfidf_vectorizer.fit_transform(sentences)%0A kmeans = KMeans(n_clusters=nb_of_clusters)%0A kmeans.fit(tfidf_matrix)%0A clusters = collections.defaultdict(list)%0A for i, label in enumerate(kmeans.labels_):%0A clusters%5Blabel%5D.append(i)%0A return dict(clusters)%0A%0Aimport csv%0A%0Awith open(r'C:%5CSales%5CSP.csv') as f:%0A reader = csv.reader(f)%0A Pre_sentence = list(reader)%0A%0Aflatten = lambda l: %5Bitem for sublist in l for item in sublist%5D%0Asentences = flatten(Pre_sentence)%0A%0Awith open(r'C:%5CSales%5CCat.csv') as g:%0A reader_cat = csv.reader(g)%0A Pre_Cat = list(reader_cat)%0ACats = flatten(Pre_Cat)%0A%0Aif __name__ == %22__main__%22:%0A # sentences = %5B%22Nature is beautiful%22,%22I like green apples%22,%0A%09 # %22We should protect the trees%22,%22Fruit trees provide fruits%22,%0A%09 # %22Green apples are tasty%22,%22My name is Dami%22%5D%0A nclusters= 100%0A clusters = cluster_sentences(sentences, nclusters)%0A for cluster in range(nclusters):%0A print (%22Grouped Engagements %22,cluster,%22:%22)%0A for i,sentence in enumerate(clusters%5Bcluster%5D):%0A print (%22%5CtEngagement %22, Cats%5Bsentence%5D,%22: %22, sentences%5Bsentence%5D)%0A
b0377568c9b927db588b006b7312cbe8ed9d48b7
Add tremelo example
examples/tremelo.py
examples/tremelo.py
Python
0.000358
@@ -0,0 +1,725 @@ +# Author: Martin McBride%0A# Created: 2016-01-08%0A# Copyright (C) 2016, Martin McBride%0A# License: MIT%0A# Website sympl.org/pysound%0A#%0A# Square wave example%0A%0A%0Atry:%0A import pysound%0Aexcept ImportError:%0A # if pysound is not installed append parent dir of __file__ to sys.path%0A import sys, os%0A sys.path.insert(0, os.path.abspath(os.path.split(os.path.abspath(__file__))%5B0%5D+'/..'))%0A%0Afrom pysound.components.soundfile import write_wav%0Afrom pysound.components.wavetable import square_wave%0Afrom pysound.components.wavetable import sine_wave%0A%0A#%0A# Create a tremelo effect%0A#%0Aamp = sine_wave(frequency=10, amplitude=0.1, offset = 0.8)%0Awave = square_wave(frequency=400, amplitude=amp)%0Awrite_wav(source=wave, filename='tremelo.wav')
ea26478495d5aec6925e32c9a87245bf2e1e4bc8
Add script demonstrating raising and catching Exceptions.
rps/errors.py
rps/errors.py
Python
0
@@ -0,0 +1,383 @@ +gestures = %5B%22rock%22, %22paper%22, %22scissors%22%5D%0A%0Adef verify_move(player_move):%0A if player_move not in gestures:%0A raise Exception(%22Wrong input!%22)%0A return player_move%0A%0A# let's catch an exception%0Atry:%0A player_move = verify_move(input(%22%5Brock,paper,scissors%5D: %22))%0A print(%22The move was correct.%22)%0Aexcept Exception:%0A print(%22The move was incorrect and Exception was raised.%22)%0A
ecb3bd6fd9b6496a751a2145909648ba1be8f908
add linear interpolation tests
isochrones/tests/test_interp.py
isochrones/tests/test_interp.py
Python
0.000001
@@ -0,0 +1,1095 @@ +import itertools%0Aimport logging%0A%0Aimport numpy as np%0Aimport pandas as pd%0Afrom scipy.interpolate import RegularGridInterpolator%0A%0Afrom isochrones.interp import DFInterpolator%0A%0Adef test_interp():%0A xx, yy, zz = %5Bnp.arange(10 + np.log10(n))*n for n in %5B1, 10, 100%5D%5D%0A%0A def func(x, y, z):%0A return x**2*np.cos(y/10) + z%0A%0A df = pd.DataFrame(%5B(x, y, z, func(x, y, z)) for x, y, z in itertools.product(xx, yy, zz)%5D,%0A columns=%5B'x', 'y', 'z', 'val'%5D).set_index(%5B'x','y', 'z'%5D)%0A%0A grid = np.reshape(df.val.values, (10, 11, 12))%0A interp = RegularGridInterpolator(%5Bxx, yy, zz%5D, grid)%0A%0A df_interp = DFInterpolator(df)%0A%0A grid_pars = %5B6, 50, 200%5D%0A pars = %5B3.1, 44, 503%5D%0A%0A # Make sure grid point returns correct exact value%0A assert df_interp(grid_pars, 'val') == func(*grid_pars)%0A%0A # Check linear interpolation vis-a-vis scipy%0A try:%0A assert np.isclose(df_interp(pars, 'val'), interp(pars)%5B0%5D, rtol=1e-10, atol=1e-11)%0A except AssertionError:%0A logging.debug('mine: %7B%7D, scipy: %7B%7D'.format(df_interp(pars, 'val'), interp(pars)%5B0%5D))%0A raise%0A
947c9ef100686fa1ec0acaa10bc49bf6c785665b
Use unified class for json output
ffflash/container.py
ffflash/container.py
Python
0.000011
@@ -0,0 +1,1299 @@ +from os import path%0A%0Afrom ffflash import RELEASE, log, now, timeout%0Afrom ffflash.lib.clock import epoch_repr%0Afrom ffflash.lib.data import merge_dicts%0Afrom ffflash.lib.files import read_json_file, write_json_file%0A%0A%0Aclass Container:%0A def __init__(self, spec, filename):%0A self._spec = spec%0A self._location = path.abspath(filename)%0A self.data = read_json_file(self._location, fallback=%7B%7D)%0A%0A self._info()%0A%0A def _info(self, info=%7B%7D):%0A self.data%5B'_info'%5D = self.data.get('_info', %7B%7D)%0A self.data%5B'_info'%5D%5B'generator'%5D = RELEASE%0A%0A self.data%5B'_info'%5D%5B'access'%5D = self.data%5B'_info'%5D.get('access', %7B%7D)%0A if not self.data%5B'_info'%5D%5B'access'%5D.get('first', False):%0A self.data%5B'_info'%5D%5B'access'%5D%5B'first'%5D = now%0A self.data%5B'_info'%5D%5B'access'%5D%5B'last'%5D = now%0A self.data%5B'_info'%5D%5B'access'%5D%5B'overall'%5D = epoch_repr(%0A abs(now - self.data%5B'_info'%5D%5B'access'%5D%5B'first'%5D),%0A ms=True%0A )%0A self.data%5B'_info'%5D%5B'access'%5D%5B'timeout'%5D = timeout%0A%0A if info:%0A self.data%5B'_info'%5D = merge_dicts(self.data%5B'_info'%5D, info)%0A%0A def save(self, info=%7B%7D):%0A self._info(info)%0A if write_json_file(self._location, self.data):%0A log.info('%7B%7D saved %7B%7D'.format(self._spec, self._location))%0A
6c5dad5d617892a3ea5cdd20cbaef89189307195
add simple content-based model for coldstart
polara/recommender/coldstart/models.py
polara/recommender/coldstart/models.py
Python
0
@@ -0,0 +1,809 @@ +import numpy as np%0Afrom polara.recommender.models import RecommenderModel%0A%0A%0Aclass ContentBasedColdStart(RecommenderModel):%0A def __init__(self, *args, **kwargs):%0A super(ContentBasedColdStart, self).__init__(*args, **kwargs)%0A self.method = 'CB'%0A self._key = '%7B%7D_cold'.format(self.data.fields.itemid)%0A self._target = self.data.fields.userid%0A%0A def build(self):%0A pass%0A%0A def get_recommendations(self):%0A item_similarity_scores = self.data.cold_items_similarity%0A%0A user_item_matrix = self.get_training_matrix()%0A user_item_matrix.data = np.ones_like(user_item_matrix.data)%0A%0A scores = item_similarity_scores.dot(user_item_matrix.T).tocsr()%0A top_similar_users = self.get_topk_elements(scores).astype(np.intp)%0A return top_similar_users%0A
2ca6b22e645cbbe63737d4ac3929cb23700a2e06
Prepare v1.2.342.dev
flexget/_version.py
flexget/_version.py
""" Current FlexGet version. This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by release scripts in continuous integration. Should (almost) never be set manually. The version should always be set to the <next release version>.dev The jenkins release job will automatically strip the .dev for release, and update the version again for continued development. """ __version__ = '1.2.341'
Python
0.000002
@@ -439,11 +439,15 @@ '1.2.34 -1 +2.dev '%0A
edbc9f2c31f98e1447c352058aa05e6884a0927b
Create fold_eigenvalues.py
fold_eigenvalues.py
fold_eigenvalues.py
Python
0.000002
@@ -0,0 +1,1222 @@ +#Definition of inputs and outputs%0A#==================================%0A##%5BMes scripts GEOL%5D=group%0A##entree=vector%0A##dip_dir=field entree%0A##dip=field entree%0A%0A#Algorithm body%0A#==================================%0Afrom qgis.core import *%0Afrom apsg import *%0A%0A%0Alayer = processing.getObject(entree)%0Adipdir = layer.fieldNameIndex(dip_dir)%0Adip = layer.fieldNameIndex(dip)%0A%0A%0Aif layer.selectedFeatureCount():%0A g= Group(%5BVec3(Fol(elem.attributes()%5Bdipdir%5D,elem.attributes()%5Bdip%5D)) for elem in layer.selectedFeatures()%5D,name='plis')%0Aelse:%0A g= Group(%5BVec3(Fol(elem.attributes()%5Bdipdir%5D,elem.attributes()%5Bdip%5D)) for elem in layer.getFeatures()%5D,name='plis')%0A%0A%0A%0A%0A%0Aresultat= %22fold plunge: : %22 + str(int(round(Ortensor(g).eigenlins.data%5B2%5D.dd%5B1%5D))) + %22 -%3E %22 + str(int(round(Ortensor(g).eigenlins.data%5B2%5D.dd%5B0%5D)))%0A%0As = StereoNet()%0Aa = s.ax%0As.line(g.aslin, 'b.',markersize=18)%0As.line(Ortensor(g).eigenlins.data%5B0%5D,'g.',markersize=18)%0As.plane(Ortensor(g).eigenfols.data%5B0%5D,'g')%0As.line(Ortensor(g).eigenlins.data%5B1%5D,'c.',markersize=18)%0As.plane(Ortensor(g).eigenfols.data%5B1%5D,'c')%0As.line(Ortensor(g).eigenlins.data%5B2%5D,'r.',markersize=18)%0As.plane(Ortensor(g).eigenfols.data%5B2%5D,'r')%0Aa.set_title(resultat, y=1.06, size=14, color='red')%0As.show()%0A
f55771da6a617c71f2eb620c11fb54e033c64338
Migrate upload-orange-metadata process type
resolwe_bio/migrations/0002_metadata_table_type.py
resolwe_bio/migrations/0002_metadata_table_type.py
Python
0
@@ -0,0 +1,464 @@ +from django.db import migrations%0A%0Afrom resolwe.flow.migration_ops import ResolweProcessChangeType%0A%0A%0Aclass Migration(migrations.Migration):%0A %22%22%22%0A Change the %60%60upload-orange-metadata%60%60 process type.%0A %22%22%22%0A%0A dependencies = %5B%0A (%22resolwe_bio%22, %220001_squashed_0015_sample_indices%22),%0A %5D%0A%0A operations = %5B%0A ResolweProcessChangeType(%0A process=%22upload-orange-metadata%22,%0A new_type=%22data:metadata:unique:%22,%0A ),%0A %5D%0A
4170807e4a1c70eef6416fe3f1661c9c1c99a9da
Add pysal test
tests/test_pysal.py
tests/test_pysal.py
Python
0.000026
@@ -0,0 +1,176 @@ +import unittest%0A%0Afrom pysal.weights import lat2W%0A%0Aclass TestPysal(unittest.TestCase):%0A def test_distance_band(self):%0A w = lat2W(4,4)%0A self.assertEqual(16, w.n)
69db5c70e6ba9cc5af2910f08a6e2c4421397ead
Fix path joining on Windows
tests/test_types.py
tests/test_types.py
# -*- coding: utf-8 -*- from subprocess import PIPE from mock import Mock from pathlib import Path import pytest from tests.utils import CorrectedCommand, Rule, Command from thefuck import const from thefuck.exceptions import EmptyCommand class TestCorrectedCommand(object): def test_equality(self): assert CorrectedCommand('ls', None, 100) == \ CorrectedCommand('ls', None, 200) assert CorrectedCommand('ls', None, 100) != \ CorrectedCommand('ls', lambda *_: _, 100) def test_hashable(self): assert {CorrectedCommand('ls', None, 100), CorrectedCommand('ls', None, 200)} == {CorrectedCommand('ls')} def test_representable(self): assert '{}'.format(CorrectedCommand('ls', None, 100)) == \ 'CorrectedCommand(script=ls, side_effect=None, priority=100)' assert u'{}'.format(CorrectedCommand(u'echo café', None, 100)) == \ u'CorrectedCommand(script=echo café, side_effect=None, priority=100)' class TestRule(object): def test_from_path(self, mocker): match = object() get_new_command = object() load_source = mocker.patch( 'thefuck.types.load_source', return_value=Mock(match=match, get_new_command=get_new_command, enabled_by_default=True, priority=900, requires_output=True)) assert Rule.from_path(Path('/rules/bash.py')) \ == Rule('bash', match, get_new_command, priority=900) load_source.assert_called_once_with('bash', '/rules/bash.py') @pytest.mark.parametrize('rules, exclude_rules, rule, is_enabled', [ (const.DEFAULT_RULES, [], Rule('git', enabled_by_default=True), True), (const.DEFAULT_RULES, [], Rule('git', enabled_by_default=False), False), ([], [], Rule('git', enabled_by_default=False), False), ([], [], Rule('git', enabled_by_default=True), False), (const.DEFAULT_RULES + ['git'], [], Rule('git', enabled_by_default=False), True), (['git'], [], Rule('git', enabled_by_default=False), True), (const.DEFAULT_RULES, ['git'], Rule('git', enabled_by_default=True), False), (const.DEFAULT_RULES, ['git'], Rule('git', enabled_by_default=False), False), ([], ['git'], Rule('git', enabled_by_default=True), False), ([], ['git'], Rule('git', enabled_by_default=False), False)]) def test_is_enabled(self, settings, rules, exclude_rules, rule, is_enabled): settings.update(rules=rules, exclude_rules=exclude_rules) assert rule.is_enabled == is_enabled def test_isnt_match(self): assert not Rule('', lambda _: False).is_match( Command('ls')) def test_is_match(self): rule = Rule('', lambda x: x.script == 'cd ..') assert rule.is_match(Command('cd ..')) @pytest.mark.usefixtures('no_colors') def test_isnt_match_when_rule_failed(self, capsys): rule = Rule('test', Mock(side_effect=OSError('Denied')), requires_output=False) assert not rule.is_match(Command('ls')) assert capsys.readouterr()[1].split('\n')[0] == '[WARN] Rule test:' def test_get_corrected_commands_with_rule_returns_list(self): rule = Rule(get_new_command=lambda x: [x.script + '!', x.script + '@'], priority=100) assert list(rule.get_corrected_commands(Command(script='test'))) \ == [CorrectedCommand(script='test!', priority=100), CorrectedCommand(script='test@', priority=200)] def test_get_corrected_commands_with_rule_returns_command(self): rule = Rule(get_new_command=lambda x: x.script + '!', priority=100) assert list(rule.get_corrected_commands(Command(script='test'))) \ == [CorrectedCommand(script='test!', priority=100)] class TestCommand(object): @pytest.fixture(autouse=True) def Popen(self, monkeypatch): Popen = Mock() Popen.return_value.stdout.read.return_value = b'stdout' Popen.return_value.stderr.read.return_value = b'stderr' monkeypatch.setattr('thefuck.types.Popen', Popen) return Popen @pytest.fixture(autouse=True) def prepare(self, monkeypatch): monkeypatch.setattr('thefuck.types.os.environ', {}) monkeypatch.setattr('thefuck.types.Command._wait_output', staticmethod(lambda *_: True)) def test_from_script_calls(self, Popen, settings): settings.env = {} assert Command.from_raw_script( ['apt-get', 'search', 'vim']) == Command( 'apt-get search vim', 'stdout', 'stderr') Popen.assert_called_once_with('apt-get search vim', shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, env={}) @pytest.mark.parametrize('script, result', [ ([''], None), (['', ''], None), (['ls', '-la'], 'ls -la'), (['ls'], 'ls')]) def test_from_script(self, script, result): if result: assert Command.from_raw_script(script).script == result else: with pytest.raises(EmptyCommand): Command.from_raw_script(script)
Python
0
@@ -18,16 +18,26 @@ -8 -*-%0A%0A +import os%0A from sub @@ -1484,16 +1484,77 @@ =True))%0A + rule_path = os.path.join(os.sep, 'rules', 'bash.py')%0A @@ -1580,32 +1580,25 @@ th(Path( -'/ rule -s/bash.py' +_path )) %5C%0A @@ -1719,24 +1719,17 @@ h', -'/ rule -s/bash.py' +_path )%0A%0A
484a2bf0c28aa2bbc910ca20849840bf518d4329
Add utils.banners test case
tests/test_utils.py
tests/test_utils.py
Python
0.000001
@@ -0,0 +1,880 @@ +# Foremast - Pipeline Tooling%0A#%0A# Copyright 2016 Gogo, LLC%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0A%22%22%22Test utils.%22%22%22%0A%0Afrom unittest import mock%0Afrom foremast.utils.banners import banner%0A%0A%[email protected]('foremast.utils.banners.LOG')%0Adef test_utils_banner(mock_log):%0A banner('test', border='+', width=10)%0A mock_log.info.assert_called_with('+' * 10)%0A
45efbbdfd62cd0f9f8232bfd7ebd1aae0ac6cd17
Create humidity.py
abstractions/sensor/humidity/humidity.py
abstractions/sensor/humidity/humidity.py
Python
0.001003
@@ -0,0 +1,843 @@ +# This code has to be added to __init__.py in folder .../devices/sensor%0A%0Aclass Humidity():%0A def __family__(self):%0A return %22Humidity%22%0A%0A def __getHumidity__(self):%0A raise NotImplementedError%0A%0A @api(%22Humidity%22, 0)%0A @request(%22GET%22, %22sensor/humidity/*%22)%0A @response(contentType=M_JSON)%0A def humidityWildcard(self):%0A values = %7B%7D%0A humidity = self.__getHumidity__()%0A values%5B%22float%22%5D = %22%25f%22 %25 humidity%0A values%5B%22percent%22%5D = %22%25d%22 %25 (humidity * 100)%0A return values%0A%0A @api(%22Humidity%22)%0A @request(%22GET%22, %22sensor/humidity/float%22)%0A @response(%22%25f%22)%0A def getHumidity(self):%0A return self.__getHumidity__()%0A%0A @api(%22Humidity%22)%0A @request(%22GET%22, %22sensor/humidity/percent%22)%0A @response(%22%25d%22)%0A def getHumidityPercent(self):%0A return self.__getHumidity__() * 100%0A%0A
c9bd5ba167284d79ae0cbe7aaaf9ec8536bef918
add hiprec.py
benchexec/tools/hiprec.py
benchexec/tools/hiprec.py
Python
0.002017
@@ -0,0 +1,2827 @@ +#!/usr/bin/env python%0A%22%22%22%0ABenchExec is a framework for reliable benchmarking.%0AThis file is part of BenchExec.%0A%0ACopyright (C) 2007-2015 Dirk Beyer%0AAll rights reserved.%0A%0ALicensed under the Apache License, Version 2.0 (the %22License%22);%0Ayou may not use this file except in compliance with the License.%0AYou may obtain a copy of the License at%0A%0A http://www.apache.org/licenses/LICENSE-2.0%0A%0AUnless required by applicable law or agreed to in writing, software%0Adistributed under the License is distributed on an %22AS IS%22 BASIS,%0AWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0ASee the License for the specific language governing permissions and%0Alimitations under the License.%0A%22%22%22%0A%0A# prepare for Python 3%0Afrom __future__ import absolute_import, division, print_function, unicode_literals%0A%0Aimport logging%0Aimport subprocess%0Aimport sys%0Aimport os%0Aimport re%0A%0Aimport benchexec.util as util%0Aimport benchexec.tools.template%0Aimport benchexec.result as result%0A%0A%0Asys.dont_write_bytecode = True # prevent creation of .pyc files%0A%0Aif __name__ == %22__main__%22:%0A sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir))%0A%0Aimport benchexec.result as result%0Aimport benchexec.util as util%0Aimport benchexec.tools.template%0Afrom benchexec.model import SOFTTIMELIMIT%0A%0AREQUIRED_PATHS = %5B%0A %22hiprec%22,%0A %5D%0A%0Aclass Tool(benchexec.tools.template.BaseTool):%0A %22%22%22%0A Tool wrapper for HIPrec.%0A %22%22%22%0A%0A def executable(self):%0A executable = util.find_executable('hiprec')%0A return executable%0A%0A%0A def working_directory(self, executable):%0A return os.curdir%0A%0A%0A def name(self):%0A return 'hiprec'%0A%0A%0A def cmdline(self, executable, options, tasks, propertyfile=None, rlimits=%7B%7D):%0A return %5Bexecutable%5D + options + tasks%0A%0A%0A def determine_result(self, returncode, returnsignal, output, isTimeout):%0A %22%22%22%0A @param returncode: code returned by CPAchecker%0A @param returnsignal: signal, which terminated CPAchecker%0A @param output: the output of CPAchecker%0A @return: status of CPAchecker after executing a run%0A %22%22%22%0A%0A for line in output:%0A if line.startswith('Verification result: '):%0A line = line%5B22:%5D.strip()%0A if line.startswith('TRUE'):%0A newStatus = result.RESULT_TRUE_PROP%0A elif line.startswith('FALSE'):%0A newStatus = result.RESULT_FALSE_REACH%0A else:%0A newStatus = result.RESULT_UNKNOWN%0A%0A if not status:%0A status = newStatus%0A elif newStatus != result.RESULT_UNKNOWN:%0A status = %22%7B0%7D (%7B1%7D)%22.format(status, newStatus)%0A%0A if not status:%0A status = result.RESULT_UNKNOWN%0A return status%0A
d726fd9b05b846097ee877ad0897f8416dbceaf7
Add missing __init__
gallery/__init__.py
gallery/__init__.py
Python
0.998696
@@ -0,0 +1,23 @@ +from .gallery import *%0A
f8da70b177fff3d87e55ecd9972c7a5e6deea964
Fix to documentation in function.py
blaze/compute/function.py
blaze/compute/function.py
""" The purpose of this module is to create blaze functions. A Blaze Function carries a polymorphic signature which allows it to verify well-typedness over the input arguments, and to infer the result of the operation. Blaze function also create a deferred expression graph when executed over operands. A blaze function carries default ckernel implementations as well as plugin implementations. """ from __future__ import print_function, division, absolute_import from collections import namedtuple # TODO: Remove circular dependency between blaze.objects.Array and blaze.compute import blaze import datashape from datashape import coretypes from ..datadescriptor import DeferredDescriptor from .expr import construct, ExprContext Overload = namedtuple('Overload', 'resolved_sig, sig, func') class BlazeFunc(object): """ Blaze function. This is like the numpy ufunc object, in that it holds all the overloaded implementations of a function, and provides dispatch when called as a function. Objects of this type can be created directly, or using one of the decorators like @function . Attributes ---------- overloader : datashape.OverloadResolver This is the multiple dispatch overload resolver which is used to determine the overload upon calling the function. ckernels : list of ckernels This is the list of ckernels corresponding to the signatures in overloader. plugins : dict of {pluginname : (overloader, datalist)} For each plugin that has registered with this blaze function, there is an overloader and corresponding data object describing execution using that plugin. fullname : string The fully qualified name of the function. """ def __init__(self, module, name): self._module = module self._name = name # The ckernels list corresponds to the # signature indices in the overloader self.overloader = datashape.OverloadResolver(self.fullname) self.ckernels = [] # Each plugin has its own overloader and data (a two-tuple) self.plugins = {} @property def name(self): """Return the name of the blazefunc.""" return self._name @property def module(self): return self._module @property def fullname(self): return self._module + '.' + self._name @property def available_plugins(self): return list(self.plugins.keys()) def add_overload(self, sig, ck): """ Adds a single signature and its ckernel to the overload resolver. """ self.overloader.extend_overloads([sig]) self.ckernels.append(ck) def add_plugin_overload(self, sig, data, pluginname): """ Adds a single signature and corresponding data for a plugin implementation of the function. """ # Get the overloader and data list for the plugin overloader, datalist = self.plugins.get(pluginname, (None, None)) if overloader is None: overloader = datashape.OverloadResolver(self.fullname) datalist = [] self.plugins[pluginname] = (overloader, datalist) # Add the overload overloader.extend_overloads([sig]) datalist.append(data) def __call__(self, *args): """ Apply blaze kernel `kernel` to the given arguments. Returns: a Deferred node representation the delayed computation """ # Convert the arguments into blaze.Array args = [blaze.array(a) for a in args] # Merge input contexts ctxs = [term.expr[1] for term in args if isinstance(term, blaze.Array) and term.expr] ctx = ExprContext(ctxs) # Find match to overloaded function argstype = coretypes.Tuple([a.dshape for a in args]) idx, match = self.overloader.resolve_overload(argstype) overload = Overload(match, self.overloader[idx], self.ckernels[idx]) # Construct graph term = construct(self, ctx, overload, args) desc = DeferredDescriptor(term.dshape, (term, ctx)) return blaze.Array(desc) def __str__(self): return "BlazeFunc %s" % self.name def __repr__(self): # TODO proper repr return str(self) def _add_elementwise_dims_to_ds(ds, typevar): if isinstance(ds, coretypes.DataShape): tlist = ds.parameters else: tlist = (ds,) return coretypes.DataShape(typevar, *tlist) def _add_elementwise_dims_to_sig(sig, typevarname): # Process the signature to add 'Dims... *' broadcasting sig = datashape.dshape(sig) if len(sig) == 1: sig = sig[0] if not isinstance(sig, coretypes.Function): raise TypeError(('Only function signatures allowed as' + 'overloads, not %s') % sig) if datashape.has_ellipsis(sig): raise TypeError(('Signature provided to ElementwiseBlazeFunc' + 'already includes ellipsis: %s') % sig) dims = coretypes.Ellipsis(coretypes.TypeVar(typevarname)) params = [_add_elementwise_dims_to_ds(param, dims) for param in sig.parameters] return coretypes.Function(*params) class ElementwiseBlazeFunc(BlazeFunc): """ This is a kind of BlazeFunc that is always processed element-wise. When overloads are added to it, they have 'Dims... *' prepend the the datashape of every argument and the return type. """ def add_overload(self, sig, ck): # Prepend 'Dims... *' to args and return type sig = _add_elementwise_dims_to_sig(sig, 'Dims') BlazeFunc.add_overload(self, sig, ck) def add_plugin_overload(self, sig, data, pluginname): # Prepend 'Dims... *' to args and return type sig = _add_elementwise_dims_to_sig(sig, 'Dims') BlazeFunc.add_plugin_overload(self, sig, data, pluginname)
Python
0.000014
@@ -1674,16 +1674,166 @@ plugin.%0A + name : string%0A The name of the function (e.g. %22sin%22).%0A module : string%0A The name of the module the function is in (e.g. %22blaze%22)%0A full @@ -1894,16 +1894,35 @@ function + (e.g. %22blaze.sin%22) .%0A%0A %22
c206969facfc0e46d7ec4d3f60ce2e6a07956dbd
Use filfinder to get the average radial width of features in the moment 0
14B-088/HI/analysis/run_filfinder.py
14B-088/HI/analysis/run_filfinder.py
Python
0
@@ -0,0 +1,1257 @@ +%0Afrom fil_finder import fil_finder_2D%0Afrom basics import BubbleFinder2D%0Afrom spectral_cube.lower_dimensional_structures import Projection%0Afrom astropy.io import fits%0Afrom radio_beam import Beam%0Afrom astropy.wcs import WCS%0Aimport astropy.units as u%0Aimport matplotlib.pyplot as p%0A%0A'''%0AFilaments in M33? Why not?%0A'''%0A%0Amom0_fits = fits.open(%22/home/eric/MyRAID/M33/14B-088/HI/full_imaging/M33_14B-088_HI.clean.image.pbcov_gt_0.3_masked.mom0.fits%22)%5B0%5D%0Amom0 = Projection(mom0_fits.data, wcs=WCS(mom0_fits.header))%0Amom0.meta%5B'beam'%5D = Beam.from_fits_header(mom0_fits.header)%0A%0A# Create the bubble mask instead of letting FilFinder to do it.%0Abub = BubbleFinder2D(mom0, sigma=80.)%0A%0Afils = fil_finder_2D(mom0.value, mom0.header, 10, distance=0.84e6)%0Afils.mask = ~(bub.mask.copy())%0Afils.medskel()%0Afils.analyze_skeletons()%0A# So at least on of the radial profiles fails. BUT the second fit is to a%0A# skeleton that is essentially the entire disk, so plot without interactivity%0A# and save the plot and the parameters shown in verbose mode.%0Ap.ioff()%0Afils.find_widths(verbose=True, max_distance=500, auto_cut=False, try_nonparam=False)%0A%0A# Fit Parameters: %5B 541.31726502 129.85351117 180.0710914 304.01262168%0A# Fit Errors: %5B 0.89151974 0.48394493 0.27313627 1.1462345 %5D%0A
da2de3d9d4b36bf2068dbe5b80d785748f532292
Add __init__.py for the schedule package
pygotham/schedule/__init__.py
pygotham/schedule/__init__.py
Python
0
@@ -0,0 +1,24 @@ +%22%22%22Schedule package.%22%22%22%0A
da5fed886d519b271a120820668d21518872f52c
Remove Duplicates from Sorted Array problem
remove_duplicates_from_sorted_array.py
remove_duplicates_from_sorted_array.py
Python
0
@@ -0,0 +1,939 @@ +'''%0AGiven a sorted array, remove the duplicates in place such that each element appear only once and return the new length.%0A%0ADo not allocate extra space for another array, you must do this in place with constant memory.%0A%0AFor example,%0AGiven input array A = %5B1,1,2%5D,%0A%0AYour function should return length = 2, and A is now %5B1,2%5D.%0A'''%0A%0A'''%0AUse two pointers. Quite straightforward.%0A'''%0A%0Aclass Solution:%0A # @param a list of integers%0A # @return an integer%0A def removeDuplicates(self, A):%0A if len(A) %3C 2:%0A return len(A)%0A p1 = 0%0A p2 = 1%0A while p2 %3C len(A):%0A while p2 %3C len(A) and A%5Bp1%5D == A%5Bp2%5D:%0A p2 += 1%0A p1 += 1%0A if p2 %3C len(A):%0A A%5Bp1%5D = A%5Bp2%5D%0A return p1%0A%0Aif __name__ == '__main__':%0A s = Solution()%0A A = %5B1, 1, 2, 2, 3%5D%0A print s.removeDuplicates(A)%0A print A%0A A = %5B1, 1%5D%0A print s.removeDuplicates(A)%0A print A%0A
14302f83d755d2319a00db123dab14b300c8c93f
Add python patch script
patch.py
patch.py
Python
0.000001
@@ -0,0 +1,1914 @@ +import json%0Aimport subprocess%0A%0A# This script will:%0A# - read current version%0A# - increment patch version%0A# - update version in a few places%0A# - insert new line in ripme.json with message%0A%0Amessage = raw_input('message: ')%0A%0Awith open('ripme.json') as dataFile:%0A ripmeJson = json.load(dataFile)%0AcurrentVersion = ripmeJson%5B%22latestVersion%22%5D%0A%0Aprint 'Current version ' + currentVersion%0A%0AversionFields = currentVersion.split('.')%0ApatchCur = int(versionFields%5B2%5D)%0ApatchNext = patchCur + 1%0AmajorMinor = versionFields%5B:2%5D%0AmajorMinor.append(str(patchNext))%0AnextVersion = '.'.join(majorMinor)%0A%0Aprint 'Updating to ' + nextVersion%0A%0AsubstrExpr = 's/' + currentVersion + '/' + nextVersion + '/'%0Asubprocess.call(%5B'sed', '-i', '-e', substrExpr, 'src/main/java/com/rarchives/ripme/ui/UpdateUtils.java'%5D)%0Asubprocess.call(%5B'git', 'grep', 'DEFAULT_VERSION.*' + nextVersion,%0A 'src/main/java/com/rarchives/ripme/ui/UpdateUtils.java'%5D)%0A%0AsubstrExpr = 's/%5C%5C%5C%22latestVersion%5C%5C%5C%22: %5C%5C%5C%22' + currentVersion + '%5C%5C%5C%22/%5C%5C%5C%22latestVersion%5C%5C%5C%22: %5C%5C%5C%22' +%5C%0A nextVersion + '%5C%5C%5C%22/'%0Asubprocess.call(%5B'sed', '-i', '-e', substrExpr, 'ripme.json'%5D)%0Asubprocess.call(%5B'git', 'grep', 'latestVersion', 'ripme.json'%5D)%0A%0AsubstrExpr = 's/%3Cversion%3E' + currentVersion + '/%3Cversion%3E' + nextVersion + '/'%0Asubprocess.call(%5B'sed', '-i', '-e', substrExpr, 'pom.xml'%5D)%0Asubprocess.call(%5B'git', 'grep', '%3Cversion%3E' + nextVersion + '%3C/version%3E', 'pom.xml'%5D)%0A%0AcommitMessage = nextVersion + ': ' + message%0AchangeLogLine = ' %5C%22' + commitMessage + '%5C%22,%5Cn'%0A%0AdataFile = open(%22ripme.json%22, %22r%22)%0AripmeJsonLines = dataFile.readlines()%0AripmeJsonLines.insert(3, changeLogLine)%0AoutputContent = ''.join(ripmeJsonLines)%0AdataFile.close()%0A%0AdataFile = open(%22ripme.json%22, %22w%22)%0AdataFile.write(outputContent)%0AdataFile.close()%0A%0Asubprocess.call(%5B'git', 'add', '-u'%5D)%0Asubprocess.call(%5B'git', 'commit', '-m', commitMessage%5D)%0Asubprocess.call(%5B'git', 'tag', nextVersion%5D)%0A
048e6960d9e6408ef5dbfad2e32d2d1768ead1da
set P(A)
pb151.py
pb151.py
Python
0.999995
@@ -0,0 +1,1181 @@ +import math%0Aimport time%0Aimport random%0A%0At1 = time.time()%0A%0A# A1:16%0A# A2:8%0A# A3:4%0A# A4:2%0A# A5:1%0A'''%0Adef getRandom(n):%0A return random.randint(1,n)%0A%0Adef getbatch(env,l):%0A i = getRandom(l)-1%0A t = env%5Bi%5D%0A env.pop(i)%0A if t == 1:%0A return env%0A if t == 2:%0A return env+%5B1%5D%0A if t == 4:%0A return env+%5B1,2%5D%0A if t == 8:%0A return env+%5B1,2,4%5D%0A%0Adef testweek():%0A env = %5B1,2,4,8%5D%0A el = 4%0A count = 0%0A for i in range(14):%0A env = getbatch(env,el)%0A el = len(env)%0A if el == 1:%0A count += 1%0A return count%0A%0AN = 600000000%0A%0Atotal = 0%0A%0Afor i in range(N):%0A total += testweek()%0A%0Aavg = total/N%0Ak = math.pow(10,6)%0A%0Aprint(round(avg*k)/k) %0A'''%0A%0Adef atone(s):%0A if s == %5B1,0,0,0%5D:%0A return 0%0A po = 0%0A pb = 0%0A for i in range(4):%0A if s%5Bi%5D == 0:%0A continue%0A pb += s%5Bi%5D%0A t = s%5B:%5D%0A t%5Bi%5D -= 1%0A for j in range(i):%0A t%5Bj%5D += 1%0A pt = atone(t)%0A if sum(t) == 1 and t%5B0%5D != 1:%0A pt += 1%0A po += s%5Bi%5D*pt%0A return po/pb%0A%0Aavg = atone(%5B1,1,1,1%5D)%0Ak = math.pow(10,6)%0A%0Aprint(round(avg*k)/k)%0A %0Aprint(%22time:%22,time.time()-t1) %0A%0A%0A %0A
0127670f04006997bc4d22c8015588c51bd5785e
Replace basestring with six.string_types
troveclient/auth.py
troveclient/auth.py
# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import print_function from troveclient import exceptions def get_authenticator_cls(cls_or_name): """Factory method to retrieve Authenticator class.""" if isinstance(cls_or_name, type): return cls_or_name elif isinstance(cls_or_name, basestring): if cls_or_name == "keystone": return KeyStoneV2Authenticator elif cls_or_name == "rax": return RaxAuthenticator elif cls_or_name == "auth1.1": return Auth1_1 elif cls_or_name == "fake": return FakeAuth raise ValueError("Could not determine authenticator class from the given " "value %r." % cls_or_name) class Authenticator(object): """ Helper class to perform Keystone or other miscellaneous authentication. The "authenticate" method returns a ServiceCatalog, which can be used to obtain a token. """ URL_REQUIRED = True def __init__(self, client, type, url, username, password, tenant, region=None, service_type=None, service_name=None, service_url=None): self.client = client self.type = type self.url = url self.username = username self.password = password self.tenant = tenant self.region = region self.service_type = service_type self.service_name = service_name self.service_url = service_url def _authenticate(self, url, body, root_key='access'): """Authenticate and extract the service catalog.""" # Make sure we follow redirects when trying to reach Keystone tmp_follow_all_redirects = self.client.follow_all_redirects self.client.follow_all_redirects = True try: resp, body = self.client._time_request(url, "POST", body=body) finally: self.client.follow_all_redirects = tmp_follow_all_redirects if resp.status == 200: # content must always present try: return ServiceCatalog(body, region=self.region, service_type=self.service_type, service_name=self.service_name, service_url=self.service_url, root_key=root_key) except exceptions.AmbiguousEndpoints: print("Found more than one valid endpoint. Use a more " "restrictive filter") raise except KeyError: raise exceptions.AuthorizationFailure() except exceptions.EndpointNotFound: print("Could not find any suitable endpoint. Correct region?") raise elif resp.status == 305: return resp['location'] else: raise exceptions.from_response(resp, body) def authenticate(self): raise NotImplementedError("Missing authenticate method.") class KeyStoneV2Authenticator(Authenticator): def authenticate(self): if self.url is None: raise exceptions.AuthUrlNotGiven() return self._v2_auth(self.url) def _v2_auth(self, url): """Authenticate against a v2.0 auth service.""" body = {"auth": { "passwordCredentials": { "username": self.username, "password": self.password} } } if self.tenant: body['auth']['tenantName'] = self.tenant return self._authenticate(url, body) class Auth1_1(Authenticator): def authenticate(self): """Authenticate against a v2.0 auth service.""" if self.url is None: raise exceptions.AuthUrlNotGiven() auth_url = self.url body = { "credentials": { "username": self.username, "key": self.password }} return self._authenticate(auth_url, body, root_key='auth') class RaxAuthenticator(Authenticator): def authenticate(self): if self.url is None: raise exceptions.AuthUrlNotGiven() return self._rax_auth(self.url) def _rax_auth(self, url): """Authenticate against the Rackspace auth service.""" body = {'auth': { 'RAX-KSKEY:apiKeyCredentials': { 'username': self.username, 'apiKey': self.password, 'tenantName': self.tenant} } } return self._authenticate(self.url, body) class FakeAuth(Authenticator): """Useful for faking auth.""" def authenticate(self): class FakeCatalog(object): def __init__(self, auth): self.auth = auth def get_public_url(self): return "%s/%s" % ('http://localhost:8779/v1.0', self.auth.tenant) def get_token(self): return self.auth.tenant return FakeCatalog(self) class ServiceCatalog(object): """Represents a Keystone Service Catalog which describes a service. This class has methods to obtain a valid token as well as a public service url and a management url. """ def __init__(self, resource_dict, region=None, service_type=None, service_name=None, service_url=None, root_key='access'): self.catalog = resource_dict self.region = region self.service_type = service_type self.service_name = service_name self.service_url = service_url self.management_url = None self.public_url = None self.root_key = root_key self._load() def _load(self): if not self.service_url: self.public_url = self._url_for(attr='region', filter_value=self.region, endpoint_type="publicURL") self.management_url = self._url_for(attr='region', filter_value=self.region, endpoint_type="adminURL") else: self.public_url = self.service_url self.management_url = self.service_url def get_token(self): return self.catalog[self.root_key]['token']['id'] def get_management_url(self): return self.management_url def get_public_url(self): return self.public_url def _url_for(self, attr=None, filter_value=None, endpoint_type='publicURL'): """ Fetch the public URL from the Trove service for a particular endpoint attribute. If none given, return the first. """ matching_endpoints = [] if 'endpoints' in self.catalog: # We have a bastardized service catalog. Treat it special. :/ for endpoint in self.catalog['endpoints']: if not filter_value or endpoint[attr] == filter_value: matching_endpoints.append(endpoint) if not matching_endpoints: raise exceptions.EndpointNotFound() # We don't always get a service catalog back ... if 'serviceCatalog' not in self.catalog[self.root_key]: raise exceptions.EndpointNotFound() # Full catalog ... catalog = self.catalog[self.root_key]['serviceCatalog'] for service in catalog: if service.get("type") != self.service_type: continue if (self.service_name and self.service_type == 'database' and service.get('name') != self.service_name): continue endpoints = service['endpoints'] for endpoint in endpoints: if not filter_value or endpoint.get(attr) == filter_value: endpoint["serviceName"] = service.get("name") matching_endpoints.append(endpoint) if not matching_endpoints: raise exceptions.EndpointNotFound() elif len(matching_endpoints) > 1: raise exceptions.AmbiguousEndpoints(endpoints=matching_endpoints) else: return matching_endpoints[0].get(endpoint_type, None)
Python
0.999709
@@ -683,16 +683,28 @@ ptions%0A%0A +import six%0A%0A %0Adef get @@ -896,18 +896,24 @@ me, -base +six. string +_types ):%0A
f0684a5bb5860c2b9caffefb47dc55781092819e
Add eTools engine
searx/engines/etools.py
searx/engines/etools.py
Python
0.000001
@@ -0,0 +1,1314 @@ +%22%22%22%0A eTools (Web)%0A%0A @website https://www.etools.ch%0A @provide-api no%0A @using-api no%0A @results HTML%0A @stable no (HTML can change)%0A @parse url, title, content%0A%22%22%22%0A%0Afrom lxml import html%0Afrom searx.engines.xpath import extract_text%0Afrom searx.url_utils import quote%0Afrom searx.utils import eval_xpath%0A%0Acategories = %5B'general'%5D%0Apaging = False%0Alanguage_support = False%0Asafesearch = True%0A%0Abase_url = 'https://www.etools.ch'%0Asearch_path = '/searchAdvancedSubmit.do'%5C%0A '?query=%7Bsearch_term%7D'%5C%0A '&pageResults=20'%5C%0A '&safeSearch=%7Bsafesearch%7D'%0A%0A%0Adef request(query, params):%0A if params%5B'safesearch'%5D:%0A safesearch = 'true'%0A else:%0A safesearch = 'false'%0A%0A params%5B'url'%5D = base_url + search_path.format(search_term=quote(query), safesearch=safesearch)%0A%0A return params%0A%0A%0Adef response(resp):%0A results = %5B%5D%0A%0A dom = html.fromstring(resp.text)%0A%0A for result in eval_xpath(dom, '//table%5B@class=%22result%22%5D//td%5B@class=%22record%22%5D'):%0A url = eval_xpath(result, './a/@href')%5B0%5D%0A title = extract_text(eval_xpath(result, './a//text()'))%0A content = extract_text(eval_xpath(result, './/div%5B@class=%22text%22%5D//text()'))%0A%0A results.append(%7B'url': url,%0A 'title': title,%0A 'content': content%7D)%0A%0A return results%0A
4523621d2dd8913cb9c4156bf20e800652318a9d
add whileloop
whileloop.py
whileloop.py
Python
0.000009
@@ -0,0 +1,44 @@ +a = 1%0Awhile a %3C 10:%0A print (a)%0A a = a+1%0A
bd7a84353b298ad14634e5c9a7b442146e9bfeeb
Create __init__.py
kesh/__init__.py
kesh/__init__.py
Python
0.000429
@@ -0,0 +1,20 @@ +# Empty __init__.py%0A
11f47fcad839b198d134f34b4489537360703a07
Add helpers.py
ckanext/orgdashboards/tests/helpers.py
ckanext/orgdashboards/tests/helpers.py
Python
0.000015
@@ -0,0 +1,984 @@ +from ckan.tests import factories%0A%0A%0Adef create_mock_data(**kwargs):%0A mock_data = %7B%7D%0A%0A mock_data%5B'organization'%5D = factories.Organization()%0A mock_data%5B'organization_name'%5D = mock_data%5B'organization'%5D%5B'name'%5D%0A mock_data%5B'organization_id'%5D = mock_data%5B'organization'%5D%5B'id'%5D%0A%0A mock_data%5B'dataset'%5D = factories.Dataset(owner_org=mock_data%5B'organization_id'%5D)%0A mock_data%5B'dataset_name'%5D = mock_data%5B'dataset'%5D%5B'name'%5D%0A mock_data%5B'package_id'%5D = mock_data%5B'dataset'%5D%5B'id'%5D%0A%0A mock_data%5B'resource'%5D = factories.Resource(package_id=mock_data%5B'package_id'%5D)%0A mock_data%5B'resource_name'%5D = mock_data%5B'resource'%5D%5B'name'%5D%0A mock_data%5B'resource_id'%5D = mock_data%5B'resource'%5D%5B'id'%5D%0A%0A mock_data%5B'resource_view'%5D = factories.ResourceView(%0A resource_id=mock_data%5B'resource_id'%5D)%0A mock_data%5B'resource_view_title'%5D = mock_data%5B'resource_view'%5D%5B'title'%5D%0A%0A mock_data%5B'context'%5D = %7B%0A 'user': factories._get_action_user_name(kwargs)%0A %7D%0A%0A return mock_data
46e1afd7faae8bd8c62f6b4f5c01322804e68163
add script to visualize simulation coefficient (us, g, us')
Modules/Biophotonics/python/iMC/script_visualize_simulation_coefficients.py
Modules/Biophotonics/python/iMC/script_visualize_simulation_coefficients.py
Python
0
@@ -0,0 +1,2123 @@ +'''%0ACreated on Sep 22, 2015%0A%0A@author: wirkert%0A'''%0A%0Aimport math%0A%0Aimport numpy as np%0Aimport matplotlib.pyplot as plt%0A%0Afrom mc.usuag import UsG%0A%0Aif __name__ == '__main__':%0A # set up plots%0A f, axarr = plt.subplots(1, 4)%0A usplt = axarr%5B0%5D%0A usplt.grid()%0A usplt.set_xlabel(%22wavelengths %5Bnm%5D%22)%0A usplt.set_ylabel(%22us %5Bcm-1%5D%22)%0A usplt.set_title(%22scattering coefficient%22)%0A gplt = axarr%5B1%5D%0A gplt.grid()%0A gplt.set_xlabel(%22wavelengths %5Bnm%5D%22)%0A gplt.set_ylabel(%22g%22)%0A gplt.set_title(%22anisotropy factor%22)%0A usrplt = axarr%5B2%5D%0A usrplt.grid()%0A usrplt.set_xlabel(%22wavelengths %5Bnm%5D%22)%0A usrplt.set_ylabel(%22us' %5Bcm-1%5D%22)%0A usrplt.set_title(%22reduced scattering coefficient%22)%0A aniplt = axarr%5B3%5D%0A aniplt.grid()%0A aniplt.set_xlabel(%22x = ka = size parameter%22)%0A aniplt.set_ylabel(%22g%22)%0A aniplt.set_xscale('log')%0A aniplt.set_title(%22anisotropy%22)%0A # set up simulation%0A usg = UsG()%0A usg.dsp = 0.04%0A# usg.n_medium = 1.33%0A# usg.n_particle = 1.40%0A wavelengths = np.arange(400, 700, 10) * 10 ** -9%0A%0A plt_range = np.array(%5B0.4 / 2. * 10 ** -6%5D)%0A # np.linspace(2., 3., 10) * 10 ** -6%0A # np.array(%5B579. / 2. * 10 ** -9%5D)%0A # np.linspace(0.1, 0.74, 10) * 10 ** -6%0A for i, d in enumerate(plt_range):%0A # set and calculate values%0A usg.r = d / 2.%0A us = %5Busg(w)%5B0%5D for w in wavelengths%5D%0A g = %5Busg(w)%5B1%5D for w in wavelengths%5D%0A g = np.array(g) / np.array(g) * 0.92%0A # plot stuff%0A # from blue to red: the color of the plotted curves%0A plt_color = (1. / float(len(plt_range)) * i,%0A 0.,%0A 1. - (1. / float(len(plt_range)) * i))%0A # plot scattering coefficient%0A usplt.plot(wavelengths * 10 ** 9, np.array(us) / 100., color=plt_color)%0A # plot anisotropy factor%0A gplt.plot(wavelengths * 10 ** 9, g, color=plt_color)%0A # plot reduced scattering coefficient%0A usrplt.plot(wavelengths * 10 ** 9, np.array(us) * (1.0 - np.array(g)) / 100.,%0A color=plt_color)%0A aniplt.plot(2. * math.pi * usg.r / wavelengths * usg.n_medium, g)%0A plt.show()%0A
9429183c1d3ba6f41eb0d5f84b0ca6ed35363b9d
Fix typo in 'Libs.private'
mesonbuild/modules/pkgconfig.py
mesonbuild/modules/pkgconfig.py
# Copyright 2015 The Meson development team # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .. import coredata, build from .. import mesonlib from .. import mlog import os class PkgConfigModule: def _get_lname(self, l, msg, pcfile): # Nothing special if not l.name_prefix_set: return l.name # Sometimes people want the library to start with 'lib' everywhere, # which is achieved by setting name_prefix to '' and the target name to # 'libfoo'. In that case, try to get the pkg-config '-lfoo' arg correct. if l.prefix == '' and l.name.startswith('lib'): return l.name[3:] # If the library is imported via an import library which is always # named after the target name, '-lfoo' is correct. if l.import_filename: return l.name # In other cases, we can't guarantee that the compiler will be able to # find the library via '-lfoo', so tell the user that. mlog.log(mlog.red('WARNING:'), msg.format(l.name, 'name_prefix', l.name, pcfile)) return l.name def generate_pkgconfig_file(self, state, libraries, subdirs, name, description, url, version, pcfile, pub_reqs, priv_reqs, conflicts, priv_libs): coredata = state.environment.get_coredata() outdir = state.environment.scratch_dir fname = os.path.join(outdir, pcfile) with open(fname, 'w') as ofile: ofile.write('prefix=%s\n' % coredata.get_builtin_option('prefix')) ofile.write('libdir=${prefix}/%s\n' % coredata.get_builtin_option('libdir')) ofile.write('includedir=${prefix}/%s\n\n' % coredata.get_builtin_option('includedir')) ofile.write('Name: %s\n' % name) if len(description) > 0: ofile.write('Description: %s\n' % description) if len(url) > 0: ofile.write('URL: %s\n' % url) if len(version) > 0: ofile.write('Version: %s\n' % version) if len(pub_reqs) > 0: ofile.write('Requires: {}\n'.format(' '.join(pub_reqs))) if len(priv_reqs) > 0: ofile.write( 'Requires.private: {}\n'.format(' '.join(priv_reqs))) if len(conflicts) > 0: ofile.write('Conflicts: {}\n'.format(' '.join(conflicts))) if len(priv_libs) > 0: ofile.write( 'Libraries.private: {}\n'.format(' '.join(priv_libs))) ofile.write('Libs: -L${libdir}') msg = 'Library target {0!r} has {1!r} set. Compilers ' \ 'may not find it from its \'-l{2}\' linker flag in the ' \ '{3!r} pkg-config file.' for l in libraries: if l.custom_install_dir: ofile.write(' -L${prefix}/%s ' % l.custom_install_dir) lname = self._get_lname(l, msg, pcfile) # If using a custom suffix, the compiler may not be able to # find the library if l.name_suffix_set: mlog.log(mlog.red('WARNING:'), msg.format(l.name, 'name_suffix', lname, pcfile)) ofile.write(' -l{} '.format(lname)) ofile.write('\n') ofile.write('Cflags:') for h in subdirs: if h == '.': h = '' ofile.write(' ') ofile.write(os.path.join('-I${includedir}', h)) ofile.write('\n') def generate(self, state, args, kwargs): if len(args) > 0: raise mesonlib.MesonException('Pkgconfig_gen takes no positional arguments.') libs = kwargs.get('libraries', []) if not isinstance(libs, list): libs = [libs] processed_libs = [] for l in libs: if hasattr(l, 'held_object'): l = l.held_object if not isinstance(l, (build.SharedLibrary, build.StaticLibrary)): raise mesonlib.MesonException('Library argument not a library object.') processed_libs.append(l) libs = processed_libs subdirs = mesonlib.stringlistify(kwargs.get('subdirs', ['.'])) version = kwargs.get('version', '') if not isinstance(version, str): raise mesonlib.MesonException('Version must be a string.') name = kwargs.get('name', None) if not isinstance(name, str): raise mesonlib.MesonException('Name not specified.') filebase = kwargs.get('filebase', name) if not isinstance(filebase, str): raise mesonlib.MesonException('Filebase must be a string.') description = kwargs.get('description', None) if not isinstance(description, str): raise mesonlib.MesonException('Description is not a string.') url = kwargs.get('url', '') if not isinstance(url, str): raise mesonlib.MesonException('URL is not a string.') pub_reqs = mesonlib.stringlistify(kwargs.get('requires', [])) priv_reqs = mesonlib.stringlistify(kwargs.get('requires_private', [])) conflicts = mesonlib.stringlistify(kwargs.get('conflicts', [])) priv_libs = mesonlib.stringlistify(kwargs.get('libraries_private', [])) pcfile = filebase + '.pc' pkgroot = kwargs.get('install_dir',None) if pkgroot is None: pkgroot = os.path.join(state.environment.coredata.get_builtin_option('libdir'), 'pkgconfig') if not isinstance(pkgroot, str): raise mesonlib.MesonException('Install_dir must be a string.') self.generate_pkgconfig_file(state, libs, subdirs, name, description, url, version, pcfile, pub_reqs, priv_reqs, conflicts, priv_libs) return build.Data(False, state.environment.get_scratch_dir(), [pcfile], pkgroot) def initialize(): return PkgConfigModule()
Python
0
@@ -2968,147 +2968,8 @@ )))%0A - if len(priv_libs) %3E 0:%0A ofile.write(%0A 'Libraries.private: %7B%7D%5Cn'.format(' '.join(priv_libs)))%0A @@ -3734,16 +3734,162 @@ e('%5Cn')%0A + if len(priv_libs) %3E 0:%0A ofile.write(%0A 'Libs.private: -L$%7Blibdir%7D %7B%7D%5Cn'.format(' '.join(priv_libs)))%0A
b9c9a1f5cfea61050803ecc442232f2f8b4d7011
Create yaml2json.py
yaml2json.py
yaml2json.py
Python
0.000002
@@ -0,0 +1,156 @@ +#!/usr/bin/python%0A%0Aimport sys%0Aimport yaml%0Aimport json%0A%0Aif __name__ == '__main__':%0A content = yaml.load(sys.stdin)%0A print json.dumps(content, indent=2)%0A %0A
7714b3c640a3d6d7fae9dba3496adfddd9354e0e
Add CFFI binding generator
build_wide.py
build_wide.py
Python
0
@@ -0,0 +1,950 @@ +import cffi%0A%0Affibuilder = cffi.FFI()%0A%0Affibuilder.set_source(%0A '_wide',%0A r%22%22%22%0A #include %22wide.c%22%0A %22%22%22,%0A extra_compile_args=%5B'-Werror', '-fno-unwind-tables', '-fomit-frame-pointer'%5D,%0A)%0A%0Affibuilder.cdef(%0A r%22%22%22%0A typedef uint32_t wp_index;%0A typedef double wp_number;%0A%0A wp_index wide_product(%0A wp_index height,%0A const wp_number* a_data,%0A const wp_index* a_indices,%0A const wp_index* a_indptr,%0A wp_index a_width,%0A wp_index a_nnz,%0A const wp_number* b_data,%0A const wp_index* b_indices,%0A const wp_index* b_indptr,%0A wp_index b_width,%0A wp_index b_nnz,%0A wp_number* out_data,%0A wp_index* out_indices,%0A wp_index* out_indptr%0A );%0A%0A wp_index wide_product_max_nnz(%0A const wp_index* a_indptr,%0A const wp_index* b_indptr,%0A wp_index height%0A );%0A %22%22%22,%0A)%0A%0Aif __name__ == '__main__':%0A ffibuilder.compile(verbose=True)%0A
a7f90fcdffd6108a6b0e07c1bd7a32ac00a9642b
Fix cwltest for checking bare paths.
cwltool/cwltest.py
cwltool/cwltest.py
#!/usr/bin/env python import argparse import json import os import subprocess import sys import shutil import tempfile import yaml import pipes import logging import schema_salad.ref_resolver _logger = logging.getLogger("cwltest") _logger.addHandler(logging.StreamHandler()) _logger.setLevel(logging.INFO) UNSUPPORTED_FEATURE = 33 class CompareFail(Exception): pass def compare(a, b): try: if isinstance(a, dict): if a.get("class") == "File": if not b["path"].endswith("/" + a["path"]): raise CompareFail("%s does not end with %s" %(b["path"], a["path"])) # ignore empty collections b = {k: v for k, v in b.iteritems() if not isinstance(v, (list, dict)) or len(v) > 0} if len(a) != len(b): raise CompareFail("expected %s\ngot %s" % (json.dumps(a, indent=4, sort_keys=True), json.dumps(b, indent=4, sort_keys=True))) for c in a: if a.get("class") != "File" or c != "path": if c not in b: raise CompareFail("%s not in %s" % (c, b)) if not compare(a[c], b[c]): return False return True elif isinstance(a, list): if len(a) != len(b): raise CompareFail("expected %s\ngot %s" % (json.dumps(a, indent=4, sort_keys=True), json.dumps(b, indent=4, sort_keys=True))) for c in xrange(0, len(a)): if not compare(a[c], b[c]): return False return True else: if a != b: raise CompareFail("%s != %s" % (a, b)) else: return True except Exception as e: raise CompareFail(str(e)) def run_test(args, i, t): out = {} outdir = None try: if "output" in t: test_command = [args.tool] # Add prefixes if running on MacOSX so that boot2docker writes to /Users if 'darwin' in sys.platform: outdir = tempfile.mkdtemp(prefix=os.path.abspath(os.path.curdir)) test_command.extend(["--tmp-outdir-prefix={}".format(outdir), "--tmpdir-prefix={}".format(outdir)]) else: outdir = tempfile.mkdtemp() test_command.extend(["--outdir={}".format(outdir), "--quiet", t["tool"], t["job"]]) outstr = subprocess.check_output(test_command) out = {"output": json.loads(outstr)} else: test_command = [args.tool, "--conformance-test", "--basedir=" + args.basedir, "--no-container", "--quiet", t["tool"], t["job"]] outstr = subprocess.check_output(test_command) out = yaml.load(outstr) except ValueError as v: _logger.error(v) _logger.error(outstr) except subprocess.CalledProcessError as err: if err.returncode == UNSUPPORTED_FEATURE: return UNSUPPORTED_FEATURE else: _logger.error("""Test failed: %s""", " ".join([pipes.quote(tc) for tc in test_command])) _logger.error(t.get("doc")) _logger.error("Returned non-zero") return 1 except yaml.scanner.ScannerError as e: _logger.error("""Test failed: %s""", " ".join([pipes.quote(tc) for tc in test_command])) _logger.error(outstr) _logger.error("Parse error %s", str(e)) pwd = os.path.abspath(os.path.dirname(t["job"])) # t["args"] = map(lambda x: x.replace("$PWD", pwd), t["args"]) # if "stdin" in t: # t["stdin"] = t["stdin"].replace("$PWD", pwd) failed = False if "output" in t: checkkeys = ["output"] else: checkkeys = ["args", "stdin", "stdout", "createfiles"] for key in checkkeys: try: compare(t.get(key), out.get(key)) except CompareFail as ex: _logger.warn("""Test failed: %s""", " ".join([pipes.quote(tc) for tc in test_command])) _logger.warn(t.get("doc")) _logger.warn("%s expected %s\n got %s", key, json.dumps(t.get(key), indent=4, sort_keys=True), json.dumps(out.get(key), indent=4, sort_keys=True)) _logger.warn("Compare failure %s", ex) failed = True if outdir: shutil.rmtree(outdir, True) if failed: return 1 else: return 0 def main(): parser = argparse.ArgumentParser(description='Compliance tests for cwltool') parser.add_argument("--test", type=str, help="YAML file describing test cases", required=True) parser.add_argument("--basedir", type=str, help="Basedir to use for tests", default=".") parser.add_argument("-l", action="store_true", help="List tests then exit") parser.add_argument("-n", type=str, default=None, help="Run a specific tests, format is 1,3-6,9") parser.add_argument("--tool", type=str, default="cwl-runner", help="CWL runner executable to use (default 'cwl-runner'") parser.add_argument("--only-tools", action="store_true", help="Only test tools") args = parser.parse_args() if not args.test: parser.print_help() return 1 with open(args.test) as f: tests = yaml.load(f) failures = 0 unsupported = 0 if args.only_tools: alltests = tests tests = [] for t in alltests: loader = schema_salad.ref_resolver.Loader({"id": "@id"}) cwl, _ = loader.resolve_ref(t["tool"]) if cwl["class"] == "CommandLineTool": tests.append(t) if args.l: for i, t in enumerate(tests): print "[%i] %s" % (i+1, t["doc"].strip()) return 0 if args.n is not None: ntest = [] for s in args.n.split(","): sp = s.split("-") if len(sp) == 2: ntest.extend(range(int(sp[0])-1, int(sp[1]))) else: ntest.append(int(s)-1) else: ntest = range(0, len(tests)) for i in ntest: t = tests[i] sys.stderr.write("\rTest [%i/%i] " % (i+1, len(tests))) sys.stderr.flush() rt = run_test(args, i, t) if rt == 1: failures += 1 elif rt == UNSUPPORTED_FEATURE: unsupported += 1 if failures == 0 and unsupported == 0: _logger.info("All tests passed") return 0 else: _logger.warn("%i failures, %i unsupported features", failures, unsupported) return 1 if __name__ == "__main__": sys.exit(main())
Python
0
@@ -492,16 +492,17 @@ if not +( b%5B%22path%22 @@ -528,16 +528,70 @@ %22path%22%5D) + or (%22/%22 not in b%5B%22path%22%5D and a%5B%22path%22%5D == b%5B%22path%22%5D)) :%0A
bc22cd37a62a4e8e9dbfa677a9b3f70b546f1850
Align dict values
jedihttp/handlers.py
jedihttp/handlers.py
# Copyright 2015 Cedraro Andrea <[email protected]> # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import bottle from bottle import response, request, error import json import jedi import logging app = bottle.Bottle( __name__ ) logger = logging.getLogger( __name__ ) @app.post( '/healthy' ) def healthy(): return { 'healthy': True } @app.post( '/ready' ) def ready(): return { 'ready': True } @app.post( '/completions' ) def completions(): try: logger.debug( 'received /completions request' ) script = _GetJediScript( request.json ) return { 'completions': [ { 'name': completion.name, 'description': completion.description, 'docstring': completion.docstring(), 'module_path': completion.module_path, 'line': completion.line, 'column': completion.column } for completion in script.completions() ] } except Exception as e: message = str( e ) logger.debug( 'Exception in /completions: {0}'.format( message ) ) return bottle.HTTPError( 500, message, e ) @app.post( '/gotodefinition' ) def gotodefinition(): try: logger.debug( 'received /gotodefinition request' ) script = _GetJediScript( request.json ) return { 'definitions': [ { 'module_path': definition.module_path, 'line': definition.line, 'column': definition.column, 'in_builtin_module': definition.in_builtin_module(), 'is_keyword': definition.is_keyword, 'description': definition.description, 'docstring': definition.docstring() } for definition in script.goto_definitions() ] } except Exception as e: message = str( e ) logger.debug( 'Exception in /gotodefinition: {0}'.format( message ) ) return bottle.HTTPError( 500, message, e ) @app.post( '/gotoassignment' ) def gotoassignments(): try: logger.debug( 'received /gotoassignment request' ) script = _GetJediScript( request.json ) return { 'definitions': [ { 'module_path': definition.module_path, 'line': definition.line, 'column': definition.column, 'in_builtin_module': definition.in_builtin_module(), 'is_keyword': definition.is_keyword, 'description': definition.description, 'docstring': definition.docstring() } for definition in script.goto_assignments() ] } except Exception as e: message = str( e ) logger.debug( 'Exception in /gotoassignment: {0}'.format( message ) ) return bottle.HTTPError( 500, message, e ) @app.error() def error( err ): return err.body def _GetJediScript( request_data ): return jedi.Script( request_data[ 'source' ], request_data[ 'line' ], request_data[ 'col' ], request_data[ 'path' ] )
Python
0.000001
@@ -1862,32 +1862,38 @@ 'module_path': + definition.modu @@ -1915,32 +1915,45 @@ 'line': + definition.line @@ -1970,32 +1970,43 @@ 'column': + definition.colu @@ -2097,32 +2097,39 @@ 'is_keyword': + definition.is_k @@ -2157,32 +2157,38 @@ 'description': + definition.desc @@ -2215,32 +2215,40 @@ 'docstring': + definition.docs
6610483e55f5371d5dcfe06e984f791c3f051e4a
fix InMoov launching button
src/main/resources/resource/Intro/InMoov01_start.py
src/main/resources/resource/Intro/InMoov01_start.py
Python
0
@@ -0,0 +1,275 @@ +#########################################%0A# InMoov01_start.py%0A# categories: inmoov%0A# more info @: http://myrobotlab.org/service/InMoov%0A#########################################%0A# uncomment for virtual hardware%0A# Platform.setVirtual(True)%0Ai01 = Runtime.start('i01', 'InMoov2')
9ba00cc698a5ce38d8cfb8eb6e921df0e24525cc
Create netstew.py
netstew.py
netstew.py
Python
0.000005
@@ -0,0 +1,198 @@ +#!/opt/anaconda/bin/python2.7%0A# Print the links to stndard out.%0A%0Afrom bs4 import BeautifulSoup%0A%0Asoup = BeautifulSoup(open(%22index.html%22))%0A%0Afor link in soup.find_all('a'):%0A print(link.get('href'))%0A
2e3af241d989bf2b62bba5e344240246e8ff516b
add leave module
modules/leave.py
modules/leave.py
Python
0.000001
@@ -0,0 +1,628 @@ +class LeaveModule:%0A%09def __init__(self, circa):%0A%09%09self.circa = circa%0A%0A%09def onload(self):%0A%09%09self.circa.add_listener(%22cmd.leave%22, self.leave)%0A%09%09self.circa.add_listener(%22cmd.goaway%22, self.leave)%0A%09%09self.circa.add_listener(%22cmd.quit%22, self.quit)%0A%0A%09def onunload(self):%0A%09%09self.circa.remove_listener(%22cmd.leave%22, self.leave)%0A%09%09self.circa.remove_listener(%22cmd.goaway%22, self.leave)%0A%09%09self.circa.remove_listener(%22cmd.quit%22, self.quit)%0A%0A%09def leave(self, fr, to, text):%0A%09%09if self.circa.is_admin(fr) and fr != to:%0A%09%09%09self.circa.part(to)%0A%0A%09def quit(self, fr, to, text):%0A%09%09if self.circa.is_admin(fr):%0A%09%09%09self.circa.close()%0A%0Amodule = LeaveModule%0A
3411020a0445afcb626e7079ae2f4d17a02d27a0
Add simple YTid2AmaraID mapper.
map_ytid2amaraid.py
map_ytid2amaraid.py
Python
0
@@ -0,0 +1,2647 @@ +#!/usr/bin/env python3%0Aimport argparse, sys%0Afrom pprint import pprint%0Afrom amara_api import *%0Afrom utils import answer_me%0A%0Adef read_cmd():%0A %22%22%22Function for reading command line options.%22%22%22%0A desc = %22Program for mapping YouTube IDs to Amara IDs. If given video is not on Amara, it is created.%22%0A parser = argparse.ArgumentParser(description=desc)%0A parser.add_argument('input_file',metavar='INPUT_FILE', help='Text file containing YouTube IDs and possibly filenames.')%0A parser.add_argument('-l','--lang',dest='lang',required = True, help='Which language do we copy?')%0A parser.add_argument('-c','--credentials',dest='apifile',default='myapi.txt', help='Text file containing your API key and username on the first line.')%0A return parser.parse_args()%0A%0Aopts = read_cmd()%0Ainfile = opts.input_file%0Aapifile = opts.apifile%0Alang = opts.lang%0A%0A# We suppose that the original language is English%0Aif lang == %22en%22: %0A is_original = True # is lang the original language of the video?%0Aelse:%0A is_original = False%0A%0A# List ytids may also contain filenames%0Aytids = %5B%5D%0A# Reading file with YT id's%0Awith open(infile, %22r%22) as f:%0A for line in f:%0A ytids.append(line.split())%0A%0A# File 'apifile' should contain only one line with your Amara API key and Amara username.%0A# Amara API can be found in Settins-%3EAccount-%3E API Access (bottom-right corner)%0Afile = open(apifile, %22r%22)%0AAPI_KEY, USERNAME = file.read().split()%5B0:%5D%0Aprint('Using Amara username: '+USERNAME)%0A#print('Using Amara API key: '+API_KEY)%0A%0Aamara_headers = %7B%0A 'Content-Type': 'application/json',%0A 'X-api-username': USERNAME,%0A 'X-api-key': API_KEY,%0A 'format': 'json'%0A%7D%0A%0Aif len(ytids) %3C 20: # Do not print for large inputs%0A print(%22This is what I got from the input file:%22)%0A print(ytids)%0A%0A answer = answer_me(%22Should I proceed?%22)%0A if not answer:%0A sys.exit(1)%0A%0A%0A# Main loop%0Afor i in range(len(ytids)):%0A ytid_from = ytids%5Bi%5D%5B0%5D%0A sys.stdout.flush()%0A sys.stderr.flush()%0A%0A video_url = 'https://www.youtube.com/watch?v='+ytid_from%0A%0A # Now check whether the video is already on Amara%0A # If not, create it.%0A amara_response = check_video( video_url, amara_headers)%0A if amara_response%5B'meta'%5D%5B'total_count'%5D == 0:%0A amara_response = add_video(video_url, lang, amara_headers)%0A amara_id = amara_response%5B'id'%5D%0A amara_title = amara_response%5B'title'%5D%0A print(ytid_from, AMARA_BASE_URL+'cs/subtitles/editor/'+amara_id+'/'+lang)%0A else:%0A amara_id = amara_response%5B'objects'%5D%5B0%5D%5B'id'%5D%0A amara_title = amara_response%5B'objects'%5D%5B0%5D%5B'title'%5D%0A print(ytid_from, AMARA_BASE_URL+'cs/subtitles/editor/'+amara_id+'/'+lang)%0A%0A
04b08344d1a6305734d749bb77bc4de095c32f55
Fix UnboundLocalError
vint/linting/cli.py
vint/linting/cli.py
import sys from argparse import ArgumentParser import pkg_resources import logging from vint.linting.linter import Linter from vint.linting.env import build_environment from vint.linting.config.config_container import ConfigContainer from vint.linting.config.config_cmdargs_source import ConfigCmdargsSource from vint.linting.config.config_default_source import ConfigDefaultSource from vint.linting.config.config_global_source import ConfigGlobalSource from vint.linting.config.config_project_source import ConfigProjectSource from vint.linting.policy_set import PolicySet from vint.linting.formatter.formatter import Formatter from vint.linting.formatter.json_formatter import JSONFormatter from vint.linting.formatter.statistic_formatter import StatisticFormatter class CLI(object): def start(self): env = self._build_env(sys.argv) self._validate(env) self._adjust_log_level(env) config_dict = self._build_config_dict(env) violations = self._lint_all(env, config_dict) if len(violations) == 0: parser = self._build_argparser() parser.exit(status=0) self._print_violations(violations, config_dict) parser.exit(status=1) def _validate(self, env): parser = self._build_argparser() paths_to_lint = env['file_paths'] if len(paths_to_lint) == 0: logging.error('nothing to check') parser.print_help() parser.exit(status=1) for path_to_lint in paths_to_lint: if not path_to_lint.exists() or not path_to_lint.is_file(): logging.error('no such file or directory: `{path}`'.format( path=str(path_to_lint))) parser.exit(status=1) def _build_config_dict(self, env): config = ConfigContainer( ConfigDefaultSource(env), ConfigGlobalSource(env), ConfigProjectSource(env), ConfigCmdargsSource(env), ) return config.get_config_dict() def _build_argparser(self): parser = ArgumentParser(prog='vint', description='Lint Vim script') parser.add_argument('-v', '--version', action='version', version=self._get_version()) parser.add_argument('-V', '--verbose', action='store_true', help='output verbose message') parser.add_argument('-e', '--error', action='store_true', help='report only errors') parser.add_argument('-w', '--warning', action='store_true', help='report errors and warnings') parser.add_argument('-s', '--style-problem', action='store_true', help='report errors, warnings and style problems') parser.add_argument('-m', '--max-violations', type=int, help='limit max violations count') parser.add_argument('-c', '--color', action='store_true', help='colorize output when possible') parser.add_argument('-j', '--json', action='store_true', help='output json style') parser.add_argument('-t', '--stat', action='store_true', help='output statistic info') parser.add_argument('files', nargs='*', help='file or directory path to lint') return parser def _build_cmdargs(self, argv): """ Build command line arguments dict to use; - displaying usages - vint.linting.env.build_environment This method take an argv parameter to make function pure. """ parser = self._build_argparser() namespace = parser.parse_args(argv[1:]) cmdargs = vars(namespace) return cmdargs def _build_env(self, argv): """ Build an environment object. This method take an argv parameter to make function pure. """ cmdargs = self._build_cmdargs(argv) env = build_environment(cmdargs) return env def _build_linter(self, config_dict): policy_set = PolicySet() linter = Linter(policy_set, config_dict) return linter def _lint_all(self, env, config_dict): paths_to_lint = env['file_paths'] violations = [] linter = self._build_linter(config_dict) for file_path in paths_to_lint: violations += linter.lint_file(file_path) return violations def _get_formatter(self, config_dict): if 'cmdargs' not in config_dict: return Formatter(config_dict) cmdargs = config_dict['cmdargs'] if 'json' in cmdargs and cmdargs['json']: return JSONFormatter(config_dict) elif 'stat' in cmdargs and cmdargs['stat']: return StatisticFormatter(config_dict) else: return Formatter(config_dict) def _print_violations(self, violations, config_dict): formatter = self._get_formatter(config_dict) output = formatter.format_violations(violations) print(output) def _get_version(self): version = pkg_resources.require('vim-vint')[0].version return version def _adjust_log_level(self, env): cmdargs = env['cmdargs'] is_verbose = cmdargs.get('verbose', False) log_level = logging.DEBUG if is_verbose else logging.WARNING logger = logging.getLogger() logger.setLevel(log_level)
Python
0.000003
@@ -1027,77 +1027,74 @@ -if len(violations) == 0:%0A parser = self._build_argparser() +parser = self._build_argparser()%0A%0A if len(violations) == 0: %0A
9fef390248387e02498d18ab7bba5b23e3632c7b
Add missing file
api/constants.py
api/constants.py
Python
0.000006
@@ -0,0 +1,263 @@ +QUERY_PARAM_QUERY = 'q'%0AQUERY_PARAM_SORT = 's'%0AQUERY_PARAM_SIZE = 'size'%0AQUERY_PARAM_PAGE = 'page'%0AQUERY_PARAM_FIELDS = 'fields'%0AQUERY_PARAM_OFFSET = 'offset'%0AQUERY_PARAM_INCLUDE = 'include'%0AQUERY_PARAM_EXCLUDE = 'exclude'%0AQUERY_PARAM_WAIT_UNTIL_COMPLETE = 'wuc'%0A
8f9c979fc2936d53321a377c67cbf2e3b4667f95
Create status_light.py
status_light.py
status_light.py
Python
0.000001
@@ -0,0 +1,1625 @@ +import time%0A%0Aclass StatusLight(object):%0A %0A%09%22%22%22available patterns for the status light%22%22%22%0A%09patterns = %7B%0A%09%09'blink_fast' : (.1, %5BFalse, True%5D),%0A%09%09'blink' : (.5, %5BFalse, True%5D),%0A%09%7D%0A%0A%09%22%22%22placeholder for pattern to tenmporarily interrupt%0A%09status light with different pattern%22%22%22%0A%09interrupt_pattern = %5B0, %5B%5D%5D%0A%09%0A%09%22%22%22continue flashing, controlled by the stop%22%22%22%0A%09cont = True%0A%09%0A%09def interrupt(self, action, repeat = 1):%0A%09%09%22%22%22Interupt the current status of the light with a names action%0A%09%09%0A%09%09parameters: action the name of the action%0A%09%09 repeat: the number of times to repeatthe interruption%22%22%22%0A%09%09self.interrupt_pattern%5B0%5D = self.patterns%5Baction%5D%5B0%5D%0A%0A%09%09for i in range(0, repeat):%0A%09%09%09self.interrupt_pattern%5B1%5D.extend(list(self.patterns%5Baction%5D%5B1%5D%5B:%5D))%0A%0A%0A%0A%09def do(self, action):%0A%09%09%22%22%22Perform a status light action%0A%09%09%0A%09%09paramaters: action: the name of tehe action%22%22%22%0A%09%09%0A%09%09if(len(self.interrupt_pattern%5B1%5D)):%0A%09%09%09# if the interrupt_pattern is not empty, prioritize it%0A%09%09%09time.sleep(self.interrupt_pattern%5B0%5D)%0A%09%09%09self.set_state(self.interrupt_pattern%5B1%5D.pop(0))%0A%09%09%09return self.do(action)%0A%0A%09%09for state in self.patterns%5Baction%5D%5B1%5D:%0A%09%09%09# peform the regular action when not interrupted%0A%09%09%09time.sleep(self.patterns%5Baction%5D%5B0%5D)%0A%09%09%09self.set_state(state)%0A%09%09%0A%09%09if self.cont:%0A%09%09%09# continue of not stopped%0A%09%09%09self.do(action)%0A%09%09%0A%09def off(self, state):%0A%09%09%22%22%22Turn off status light%22%22%22%0A%09%09self.cont = False%0A%09%09self.set_state(state)%0A%09%09%0A%09def set_state(self, state):%0A%09%09%22%22%22Turn the light on or off%22%22%22%0A%09%09print 'set state to %25s' %25 state%09%0A%09%09%0A%09%09%0A%09%09%0A%09%09%0Aif __name__ == '__main__':%0A%09light = StatusLight()%0A%09light.interrupt('blink_fast', 3)%0A%09light.do('blink')%0A%0A%0A%0A
dcced707c40c6a970d19dfca496dc86e38e8ea3c
Increments version to 0.2.2
deltas/__init__.py
deltas/__init__.py
from .apply import apply from .operations import Operation, Insert, Delete, Equal from .algorithms import segment_matcher, SegmentMatcher from .algorithms import sequence_matcher, SequenceMatcher from .tokenizers import Tokenizer, RegexTokenizer, text_split, wikitext_split from .segmenters import Segmenter, Segment, MatchableSegment __version__ = "0.2.1"
Python
0.999289
@@ -352,7 +352,7 @@ 0.2. -1 +2 %22%0A
a4ad0ffbda8beb4c2ea4ef0d181ec9ef0de3d1e1
add the md5 by python
SystemInfo/1_hashlib.py
SystemInfo/1_hashlib.py
Python
0.000218
@@ -0,0 +1,291 @@ +#!/usr/bin/python%0A#-*- coding:utf-8 -*-%0Aimport hashlib%0Aimport sys%0A%0Adef md5sum(f):%0A%09m = hashlib.md5()%0A%09with open(f) as fd:%0A%09%09while True:%0A%09%09%09data = fd.read(4096)%0A%09%09%09if data:%0A%09%09%09%09m.update(data)%0A%09%09%09else:%0A%09%09%09%09break%0A%09return m.hexdigest()%0A%0Aif __name__ == '__main__':%0A%09print md5sum(sys.argv%5B1%5D)%0A%0A%0A%0A%0A