commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
---|---|---|---|---|---|---|---|
6af41b8b1ff4a6eb28167a063668a1f173999e5c
|
Create cornersMapping.py
|
cornersMapping.py
|
cornersMapping.py
|
Python
| 0.000001 |
@@ -0,0 +1,2262 @@
+%0Aimport csv%0Aimport requests%0Aimport time%0Aimport json%0A%0Ausername = %22%22%0A%0Adef requestGeoName(row):%0A #parts = row.split(',')%0A lng = row%5B0%5D%0A lat = row%5B1%5D%0A r = requests.get(%22http://api.geonames.org/findNearestIntersectionOSMJSON?lat=%22+lat+%22&lng=%22+lng+%22&username=%22+username)%0A if (r.status_code == 200):%0A return r.json()%0A else:%0A return %7B%22error%22:r.status_code%7D%0A%0Adef requestNameWsUsig(row):%0A x = row%5B0%5D%0A y = row%5B1%5D%0A reqReverseGeo = requests.get(%22http://ws.usig.buenosaires.gob.ar/geocoder/2.2/reversegeocoding?y=%7B0%7D&x=%7B1%7D%22.format(y,x))%0A resReverseGeo = json.loads(reqReverseGeo.content.replace(%22(%22, %22%22).replace(%22)%22, %22%22), encoding=%22utf-8%22)%0A %0A reqConvertirCoord = requests.get(%22http://ws.usig.buenosaires.gob.ar/rest/convertir_coordenadas?x=%7B0%7D&y=%7B1%7D&output=lonlat%22.format(resReverseGeo%5B%22puerta_x%22%5D, resReverseGeo%5B%22puerta_y%22%5D))%0A resConvertirCoord = reqConvertirCoord.json()%0A %0A result = %7B %22intersection%22 : %7B%0A %22lng%22 : resConvertirCoord%5B%22resultado%22%5D%5B%22x%22%5D,%0A %22lat%22 : resConvertirCoord%5B%22resultado%22%5D%5B%22y%22%5D,%0A %22street1%22 : resReverseGeo%5B%22esquina%22%5D,%0A %22street2%22 : resReverseGeo%5B%22esquina%22%5D%0A %7D%7D%0A %0A return result%0A%0Awith open('mostSearchedPlaces.csv', 'rb') as csvfile:%0A with open('mostSearchedPlacesWithCorners.csv', 'a') as outputCSV:%0A csv_writer = csv.writer(outputCSV, delimiter=',')%0A %0A reader = csv.reader(csvfile, delimiter = ',')%0A i = 1%0A for row in reader:%0A geoNameResult = requestGeoName(row)%0A %0A # Check if there is no intersection%0A if (geoNameResult == %7B%7D):%0A geoNameResult = requestNameWsUsig(row)%0A %0A print(geoNameResult)%0A %0A if (not geoNameResult.has_key(%22error%22)):%0A row.append(str(geoNameResult%5B%22intersection%22%5D%5B%22lng%22%5D))%0A row.append(str(geoNameResult%5B%22intersection%22%5D%5B%22lat%22%5D))%0A row.append(geoNameResult%5B%22intersection%22%5D%5B%22street1%22%5D.encode(%22utf-8%22))%0A row.append(geoNameResult%5B%22intersection%22%5D%5B%22street2%22%5D.encode(%22utf-8%22))%0A %0A csv_writer.writerow(row)%0A print(%22Elemento %7B0%7D procesado%22.format(i))%0A i += 1%0A time.sleep(2)%0A
|
|
b2d0eaca41f6c697006eeaef38b72af649415d2b
|
Create models.py
|
{{cookiecutter.repo_name}}/{{cookiecutter.src_dir}}/{{cookiecutter.main_app}}/models.py
|
{{cookiecutter.repo_name}}/{{cookiecutter.src_dir}}/{{cookiecutter.main_app}}/models.py
|
Python
| 0.000001 |
@@ -0,0 +1,38 @@
+# -*- encoding: utf-8 -*-%0A# ! python2%0A
|
|
89c17110f9d17e99ea7686e884cfba91b4762d57
|
Add starter code for Lahman db
|
pybaseball/lahman.py
|
pybaseball/lahman.py
|
Python
| 0 |
@@ -0,0 +1,738 @@
+################################################%0A# WORK IN PROGRESS: ADD LAHMAN DB TO PYBASEBALL%0A# TODO: Make a callable function that retrieves the Lahman db%0A# Considerations: users should have a way to pull just the parts they want %0A# within their code without having to write / save permanently. They should%0A# also have the option to write and save permanently if desired. %0A################################################%0A%0Aimport requests%0Aimport zipfile%0Afrom io import BytesIO%0Afrom bs4 import BeautifulSoup%0A%0A# Download zip file and extract all files into working directory%0Aurl = %22http://seanlahman.com/files/database/baseballdatabank-2017.1.zip%22%0As=requests.get(url,stream=True)%0Az = zipfile.ZipFile(BytesIO(s.content))%0Az.extractall()%0A
|
|
8eafb1b613363f85c9b105812cd5d0047e5ca6ff
|
Add warp example script
|
image_processing/warp_image.py
|
image_processing/warp_image.py
|
Python
| 0 |
@@ -0,0 +1,1794 @@
+import argparse%0A%0Aimport cv2%0Aimport numpy as np%0Aimport matplotlib.pyplot as plt%0A%0Afrom constants import MAX_WIDTH, MAX_HEIGHT%0A%0A# Transform Parameters%0Ay = 90%0Aa = 0.75%0Adelta = (MAX_HEIGHT - y) * a%0A%0Aheight, width = 500, 320%0A# Orignal and transformed keypoints%0Apts1 = np.float32(%0A %5B%5Bdelta, y%5D,%0A %5BMAX_WIDTH - delta, y%5D,%0A %5B0, MAX_HEIGHT%5D,%0A %5BMAX_WIDTH, MAX_HEIGHT%5D%5D)%0A%0Apts2 = np.float32(%0A %5B%5B0, 0%5D,%0A %5Bwidth, 0%5D,%0A %5B0, height%5D,%0A %5Bwidth, height%5D%5D)%0A%0A# Translation Matrix%0Atx, ty = 300, 500%0AT = np.float32(%5B%5B1, 0, tx%5D, %5B0, 1, ty%5D, %5B0, 0, 1%5D%5D)%0A%0Anew_height, new_width = height + ty, int(width * 1.5) + tx%0A%0A# calculate the perspective transform matrix%0AM = cv2.getPerspectiveTransform(pts1, pts2)%0A%0A%0Adef imshow(im, y=None, delta=None, name=%22%22):%0A plt.figure(name)%0A # BGR to RGB%0A plt.imshow(im%5B:, :, ::-1%5D)%0A if y is not None:%0A plt.plot(%5B0, delta%5D, %5BMAX_HEIGHT, y%5D)%0A plt.plot(%5BMAX_WIDTH, MAX_WIDTH - delta%5D, %5BMAX_HEIGHT, y%5D)%0A plt.plot(%5Bdelta, MAX_WIDTH - delta%5D, %5By, y%5D)%0A plt.grid(True)%0A%0A%0Adef showTransform(image, y, delta):%0A im = image.copy()%0A for (cx, cy) in pts1:%0A cv2.circle(im, (int(cx), int(cy)), 8, (0, 255, 0), -1)%0A imshow(im, y, delta, name=%22transform%22)%0A%0A%0Aif __name__ == '__main__':%0A parser = argparse.ArgumentParser(description='Transform image to have a top down view')%0A parser.add_argument('-i', '--input_image', help='Input image', type=str, required=True)%0A args = parser.parse_args()%0A%0A image = cv2.imread(args.input_image)%0A assert image is not None, %22Could not read image%22%0A orignal_image = image.copy()%0A warp = cv2.warpPerspective(orignal_image, np.dot(T, M), (new_width, new_height))%0A imshow(image, name=%22original%22)%0A showTransform(image, y, delta)%0A imshow(warp, name=%22warped%22)%0A plt.show()%0A
|
|
77dfcc41b718ed26e9291b9efc47b0589b951fb8
|
Create 0001.py
|
pylyria/0001/0001.py
|
pylyria/0001/0001.py
|
Python
| 0.000252 |
@@ -0,0 +1,2 @@
+1%0A
|
|
d412ec65777431cdd696593ddecd0ee37a500b25
|
Create 0011.py
|
pylyria/0011/0011.py
|
pylyria/0011/0011.py
|
Python
| 0.000054 |
@@ -0,0 +1,385 @@
+# -*- coding: utf-8 -*-%0A#!/usr/bin/env python%0A%0Adef is_sensitive(word):%0A%09sensitive_words = %5Bline.strip() for line in open('sensitive.txt', encoding='utf-8')%5D%0A%09word = word.strip()%0A%09if word.lower() in sensitive_words:%0A%09%09return True%0A%09else:%0A%09%09return False%0A%0Aif __name__ == %22__main__%22:%0A while 1:%0A %09if is_sensitive(input()):%0A %09%09print('Freedom')%0A %09else:%0A %09%09print('Human Rights')%0A
|
|
5052318d2802284a0331fc77fd7d02bdaca39f42
|
test if a layer is working fine
|
scripts/feature_extract_test.py
|
scripts/feature_extract_test.py
|
Python
| 0.000004 |
@@ -0,0 +1,1417 @@
+%22%22%22Feature extraction test%22%22%22%0A%0Aimport numpy as np;%0Aimport sys%0Aimport theano;%0Aimport theano.tensor as T;%0Asys.path.append(%22..%22)%0A%0Aimport scae_destin.datasets as ds;%0Afrom scae_destin.convnet import ReLUConvLayer;%0Afrom scae_destin.convnet import LCNLayer%0A%0A%0An_epochs=1;%0Abatch_size=100;%0A%0AXtr, Ytr, Xte, Yte=ds.load_CIFAR10(%22/home/tejas/Desktop/cifar-10-batches-py%22);%0A%0AXtr=np.mean(Xtr, 3);%0AXte=np.mean(Xte, 3);%0AXtrain=Xtr.reshape(Xtr.shape%5B0%5D, Xtr.shape%5B1%5D*Xtr.shape%5B2%5D)%0AXtest=Xte.reshape(Xte.shape%5B0%5D, Xte.shape%5B1%5D*Xte.shape%5B2%5D)%0A%0Atrain_set_x, train_set_y=ds.shared_dataset((Xtrain, Ytr));%0Atest_set_x, test_set_y=ds.shared_dataset((Xtest, Yte));%0A%0An_train_batches=train_set_x.get_value(borrow=True).shape%5B0%5D/batch_size;%0An_test_batches=test_set_x.get_value(borrow=True).shape%5B0%5D/batch_size;%0A%0Aprint %22%5BMESSAGE%5D The data is loaded%22%0A%0AX=T.matrix(%22data%22);%0Ay=T.ivector(%22label%22);%0Aidx=T.lscalar();%0A%0Aimages=X.reshape((batch_size, 1, 32, 32))%0A%0Alayer_0=LCNLayer(filter_size=(7,7),%0A num_filters=50,%0A num_channels=1,%0A fm_size=(32,32),%0A batch_size=batch_size,%0A border_mode=%22full%22);%0A %0Aextract=theano.function(inputs=%5Bidx%5D,%0A outputs=layer_0.apply(images),%0A givens=%7BX: train_set_x%5Bidx * batch_size: (idx + 1) * batch_size%5D%7D);%0A %0Aprint extract(1).shape
|
|
47ebaa10068313c9b8fbbf2e3ffcf06597f88ff6
|
add npy2png file converter
|
convert_npy2image.py
|
convert_npy2image.py
|
Python
| 0 |
@@ -0,0 +1,1471 @@
+import sys%0Aimport math%0Aimport copy%0A%0Aimport pylab%0Aimport numpy%0A%0Afrom Image import fromarray%0Afrom scipy.misc import imread, toimage%0A%0Acmin = 0%0Acmax = 2**8 - 1 %0A%0Adef convert(file_in, file_out, index=None) :%0A%0A%09i = 0%0A%09max_count = 0%0A%0A%09while (True) :%0A%09 try :%0A%09%09input_image = numpy.load(file_in + '/image_%2507d.npy' %25 (i))%0A%09 except Exception :%0A%09%09break%0A%0A%09 output_image = file_out + '/image_%2507d.png' %25 (i)%0A%09 #output_image = file_out + '/image_%2507d.png' %25 (i/26)%0A%0A%09 # data for tirfm%0A%09 #image_array = input_image%5B256-25:256+25,256-25:256+26,1%5D%0A%09 #image_array = input_image%5B256-76:256+76,256-78:256+78,1%5D%0A%09 #image_array = input_image%5B300-50:300+50,300-50:300+50,1%5D%0A%09 #image_array = input_image%5B512-45:512+45,512-45:512+45,1%5D%0A%09 image_array = input_image%5B:,:,1%5D%0A%0A%09 #image_exp += numpy.array(image_array)%0A%0A%09 amax = numpy.amax(image_array)%0A%09 amin = numpy.amin(image_array)%0A%0A%09 if (max_count %3C amax) :%0A%09%09max_count = amax%0A%0A%09 #print i/26, amax, amin%0A%09 print i, amax, amin%0A%0A%09 # 16-bit data format%0A%09 #image_array.astype('uint16')%0A%09 #toimage(image_array, low=cmin, high=cmax, mode='I').save(output_image)%0A%0A%09 # 8-bit data format (for making movie)%0A%09 toimage(image_array, cmin=cmin, cmax=cmax).save(output_image)%0A%0A%09 #i += 26%0A%09 i += 1%0A%0A%09print 'Max count : ', max_count, 'ADC'%0A%0A%0A%0Aif __name__=='__main__':%0A%0A%09file_in = '/home/masaki/microscopy/images'%0A%09file_out = '/home/masaki/microscopy/images_png'%0A%0A%09convert(file_in, file_out)%0A%0A
|
|
211e9e9352234f5638036b5b1ec85f998609d587
|
Add a primitive MITM proxy
|
diana/utils/proxy.py
|
diana/utils/proxy.py
|
Python
| 0.000066 |
@@ -0,0 +1,2058 @@
+from diana import packet%0Aimport argparse%0Aimport asyncio%0Aimport sys%0Aimport socket%0Afrom functools import partial%0A%0Aclass Buffer:%0A def __init__(self, provenance):%0A self.buffer = b''%0A self.provenance = provenance%0A%0A def eat(self, data):%0A self.buffer += data%0A packets, self.buffer = packet.decode(self.buffer, provenance=self.provenance)%0A return packets%0A%0ABLOCKSIZE = 1024%0A%0Aif __name__ == '__main__':%0A parser = argparse.ArgumentParser(description='Simple Artemis SBS proxy')%0A parser.add_argument('proxy_port', type=int, help='Server port')%0A parser.add_argument('address', help='Server address (DNS, IPv4 or IPv6)')%0A parser.add_argument('port', type=int, nargs='?', default=2010, help='Server port')%0A args = parser.parse_args()%0A%0A loop = asyncio.get_event_loop()%0A%0A @asyncio.coroutine%0A def transit(reader, writer, provenance, tag):%0A buf = Buffer(provenance)%0A while True:%0A data = yield from reader.read(BLOCKSIZE)%0A for pkt in buf.eat(data):%0A writer.write(packet.encode(pkt, provenance=provenance))%0A sys.stdout.write('%7B%7D %7B%7D%5Cn'.format(tag, pkt))%0A sys.stdout.flush()%0A%0A @asyncio.coroutine%0A def handle_p2c(client_reader, client_writer):%0A server_reader, server_writer = yield from asyncio.open_connection(args.address,%0A args.port,%0A loop=loop)%0A asyncio.async(transit(client_reader, server_writer,%0A provenance=packet.PacketProvenance.client,%0A tag='%5BC%3ES%5D'), loop=loop)%0A asyncio.async(transit(server_reader, client_writer,%0A provenance=packet.PacketProvenance.server,%0A tag='%5BC%3CS%5D'), loop=loop)%0A%0A svr = asyncio.start_server(handle_p2c, '127.0.0.1', args.proxy_port, loop=loop)%0A server = loop.run_until_complete(svr)%0A%0A loop.run_forever()%0A%0A
|
|
d890ef34b11200738687ec49a4a005bb9ebe7c2a
|
make the module executable
|
distance/__main__.py
|
distance/__main__.py
|
Python
| 0 |
@@ -0,0 +1,130 @@
+#!/usr/bin/env python%0A%0A%0Afrom . import __version__%0A%0A%0Aprint(f%22distanceutils version %7B__version__%7D%22)%0A%0A%0A# vim:set sw=4 ts=8 sts=4 et:%0A
|
|
768b61316a10726a3281a514823f280abc142356
|
move wild into its own folder
|
tests/integration/test_wild.py
|
tests/integration/test_wild.py
|
Python
| 0 |
@@ -0,0 +1,637 @@
+import pytest%0Arequests = pytest.importorskip(%22requests%22)%0A%0Aimport vcr%0A%0Adef test_domain_redirect():%0A '''Ensure that redirects across domains are considered unique'''%0A # In this example, seomoz.org redirects to moz.com, and if those%0A # requests are considered identical, then we'll be stuck in a redirect%0A # loop.%0A url = 'http://seomoz.org/'%0A with vcr.use_cassette('domain_redirect.yaml') as cass:%0A requests.get(url, headers=%7B'User-Agent': 'vcrpy-test'%7D)%0A # Ensure that we've now served two responses. One for the original%0A # redirect, and a second for the actual fetch%0A assert len(cass) == 2%0A%0A
|
|
c193aebdc76eae285df402463c149bef328c05ef
|
Add backwards-compatible registration.urls, but have it warn pending deprecation.
|
registration/urls.py
|
registration/urls.py
|
Python
| 0 |
@@ -0,0 +1,233 @@
+import warnings%0A%0Awarnings.warn(%22Using include('registration.urls') is deprecated; use include('registration.backends.default.urls') instead%22,%0A PendingDeprecationWarning)%0A%0Afrom registration.backends.default.urls import *%0A
|
|
fe88e0d8dc3d513cd11ef9ab4cb3ea332af99202
|
Add migration
|
organization/network/migrations/0112_auto_20180502_1742.py
|
organization/network/migrations/0112_auto_20180502_1742.py
|
Python
| 0.000002 |
@@ -0,0 +1,900 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.9.11 on 2018-05-02 15:42%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('organization-network', '0111_auto_20180307_1152'),%0A %5D%0A%0A operations = %5B%0A migrations.AddField(%0A model_name='organization',%0A name='is_main',%0A field=models.BooleanField(default=False, verbose_name='is main'),%0A ),%0A migrations.AddField(%0A model_name='teamlink',%0A name='title_en',%0A field=models.CharField(blank=True, max_length=1024, null=True, verbose_name='title'),%0A ),%0A migrations.AddField(%0A model_name='teamlink',%0A name='title_fr',%0A field=models.CharField(blank=True, max_length=1024, null=True, verbose_name='title'),%0A ),%0A %5D%0A
|
|
2f582fa86aa5a8d47a066b4b47fd3425377dc05c
|
question 1.8
|
crack_1_8.py
|
crack_1_8.py
|
Python
| 0.999705 |
@@ -0,0 +1,406 @@
+'''%0Aaccording http://hawstein.com/posts/1.8.html%0Aalgorithm is that str1= %2212345%22,str2= %2251234%22%0Astr1 = str1 + str1 = %221234512345%22%0Aas a result, str2 is subString of str1%0A'''%0Astr1 = 'abcdefghi'%0Astr2 = 'ihgfedcba'%0A%0Adef isSubString(str1, str2):%0A%09if str2 in str1:%0A%09%09return True%0A%09return False%0A%0Adef isRotation(str1, str2):%0A%09if isSubString(str1+str2, str2):%0A%09%09return True%0A%09return False%0A%0Aprint isRotation(str1, str2)
|
|
48e4b9692b29d3fb9f43f37fef70ccc41f47fc0e
|
Add tests for the errors utility functions
|
yithlibraryserver/tests/errors.py
|
yithlibraryserver/tests/errors.py
|
Python
| 0.000001 |
@@ -0,0 +1,1035 @@
+import unittest%0A%0Afrom pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound%0A%0A%0Afrom yithlibraryserver.errors import password_not_found, invalid_password_id%0A%0A%0Aclass ErrorsTests(unittest.TestCase):%0A%0A def test_password_not_found(self):%0A result = password_not_found()%0A self.assertTrue(isinstance(result, HTTPNotFound))%0A self.assertTrue(result.content_type, 'application/json')%0A self.assertTrue(result.body, '%7B%22message%22: %22Password not found%22%7D')%0A%0A # try a different message%0A result = password_not_found('test')%0A self.assertTrue(result.body, '%7B%22message%22: %22test%22%7D')%0A%0A%0A def test_invalid_password_id(self):%0A result = invalid_password_id()%0A self.assertTrue(isinstance(result, HTTPBadRequest))%0A self.assertTrue(result.content_type, 'application/json')%0A self.assertTrue(result.body, '%7B%22message%22: %22Invalid password id%22%7D')%0A%0A # try a different message%0A result = invalid_password_id('test')%0A self.assertTrue(result.body, '%7B%22message%22: %22test%22%7D')%0A
|
|
4c225ec7cdafc45840b2459e8804df5818fecd71
|
add util module
|
dace/util.py
|
dace/util.py
|
Python
| 0.000001 |
@@ -0,0 +1,247 @@
+from pyramid.threadlocal import get_current_request%0Afrom substanced.util import find_objectmap%0A%0A%0Adef get_obj(oid):%0A request = get_current_request()%0A objectmap = find_objectmap(request.root)%0A obj = objectmap.object_for(oid)%0A return obj%0A
|
|
ddfc28360941a435ae22705dbc46b44cced588e7
|
Add demo file.
|
demo/demo.py
|
demo/demo.py
|
Python
| 0 |
@@ -0,0 +1,333 @@
+#!/usr/bin/env python3%0A%0Aimport fileinput%0Aimport os%0Aparentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))%0Aos.sys.path.insert(0,parentdir) %0Aimport ppp_spell_checker%0A%0A%0Aif __name__ == %22__main__%22:%0A corrector = ppp_spell_checker.StringCorrector('en')%0A while(True):%0A print(corrector.correctString(input(%22%22)))%0A
|
|
0f9488588ea66b881cdebf11a42377cb44845a5c
|
added day6-1a.py. combines transposing input list and function call into single list comprehension
|
day6/day6-1a.py
|
day6/day6-1a.py
|
Python
| 0.999998 |
@@ -0,0 +1,1507 @@
+%22%22%22--- Day 6: Signals and Noise ---%0A%0ASomething is jamming your communications with Santa. Fortunately, your signal is only partially jammed, and protocol in situations like this is to switch to a simple repetition code to get the message through.%0A%0AIn this model, the same message is sent repeatedly. You've recorded the repeating message signal (your puzzle input), but the data seems quite corrupted - almost too badly to recover. Almost.%0A%0AAll you need to do is figure out which character is most frequent for each position. For example, suppose you had recorded the following messages:%0A%0Aeedadn%0Adrvtee%0Aeandsr%0Araavrd%0Aatevrs%0Atsrnev%0Asdttsa%0Arasrtv%0Anssdts%0Antnada%0Asvetve%0Atesnvt%0Avntsnd%0Avrdear%0Advrsen%0Aenarar%0AThe most common character in the first column is e; in the second, a; in the third, s, and so on. Combining these characters returns the error-corrected message, easter.%0A%0AGiven the recording in your puzzle input, what is the error-corrected version of the message being sent?%0A%22%22%22%0A%0Aimport argparse%0A%0A%0Adef decode_column(column):%0A return sorted(column, key=column.count, reverse=True)%0A%0A%0Aparser = argparse.ArgumentParser(description='Advent of code.')%0Aparser.add_argument('inputfile', type=argparse.FileType('r'), help='Path to input file')%0Aargs = parser.parse_args()%0Alines = args.inputfile.read().rstrip(%22%5Cn%22).split(%22%5Cn%22)%0A%0A# Updated to use single list comprehension to transpose and call decode_column%0Acode = %5Bdecode_column(%5Brow%5Bi%5D for row in lines%5D)%5B0%5D for i in range(len(lines%5B0%5D))%5D%0A%0Aprint(%22%22.join(code))%0A
|
|
8003f9f643b90cf42bdd8ba0ec8d5dc2f96ba191
|
Create list-aws-queue.py
|
list-aws-queue.py
|
list-aws-queue.py
|
Python
| 0.00003 |
@@ -0,0 +1,703 @@
+# This script created a queue%0A#%0A# Author - Paul Doyle Nov 2015%0A#%0A#%0Aimport boto.sqs%0Aimport boto.sqs.queue%0Afrom boto.sqs.message import Message%0Afrom boto.sqs.connection import SQSConnection%0Afrom boto.exception import SQSError%0Aimport sys%0A%0A# Get the keys from a specific url and then use them to connect to AWS Service %0Aaccess_key_id = %22AKIAIBKC3KC4HZNSXFIA%22%0Asecret_access_key = %226DLuJWrLRu6RsxwqP8jheSo4pcTy4ZH6U+7k2gk/%22%0A%0A# Set up a connection to the AWS service. %0Aconn = boto.sqs.connect_to_region(%22eu-west-1%22, aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key)%0A%0A# Get a list of the queues that exists and then print the list out%0Ars = conn.get_all_queues()%0Afor q in rs:%0A%09print q.id%0A
|
|
298f297410b9db8b2d211b1d0edddb595f1fa469
|
Add timestamp2str()
|
datetime/datetime.py
|
datetime/datetime.py
|
Python
| 0.000258 |
@@ -0,0 +1,501 @@
+import datetime%0A%0A# ==============================================================================%0A# TIMESTAMP 2 STR%0A# ==============================================================================%0Adef timestamp2str(t, pattern=%22%25Y-%25m-%25d %25H:%25M:%25S%22):%0A %22%22%22 Given a float timestamp it returns the date as a formatted string,%0A based on the date %60pattern%60 specified %22%22%22%0A return datetime.datetime.fromtimestamp(t).strftime(pattern)%0A
|
|
130b0b5d70c6f94caa1e6dbd98aa4361a9ce4d1d
|
add tips for relax time...
|
dictate_num/train.py
|
dictate_num/train.py
|
import os
import sys
import pyttsx
import random
from data import *
EXIT_TAG = 'n'
class CTrain(object):
def __init__(self):
self._eng = pyttsx.init()
def pre(self):
print "*"*10,"DICTATE NUMBER TRAING", "*"*10
name = raw_input("Please enter your name: ")
data = CData(name).load()
if data is not None:
self._data = data
print "See you again ", name, ", your score is followings:"
self._data.showSt()
self._data.showDaySt()
else:
self._data = CData(name)
print "Welcome new challenger", name
print "You will start on level", self._data.level
return
def aft(self, doCnt):
self._data.save()
print "Bye %s, finish [%3d] , your score is followings:"%(self._data.name, doCnt)
self._data.showDetail(doCnt)
return
def run(self):
IsCon = True
idx = 1
while IsCon:
lvl = self._data.level
print "\n[%3d]"%( idx,), " now level", lvl,", Please listening..."
nums = self.genNum(lvl)
self.readNum(nums)
d = raw_input("enter what you heard(n for exit): ")
if d.lower().find(EXIT_TAG) >= 0:
IsCon = False
break
ans,lvl = self._data.score(d, nums)
if ans:
print "SUCC"
else:
print "FAIL: ", nums
idx += 1
return idx-1
def genNum(self, lvl):
s = ""
for _ in range(lvl):
d = random.randint(0,9)
s += str(d)
return s
def readNum(self, nums):
for d in nums:
self._eng.say(d)
self._eng.runAndWait()
return
def main():
train = CTrain()
train.pre()
doCnt = train.run()
train.aft(doCnt)
if __name__ == "__main__":
main()
|
Python
| 0 |
@@ -1027,303 +1027,406 @@
-print %22%5Cn%5B%253d%5D%22%25( idx,), %22 now level%22, lvl,%22, Please listening...%22%0A nums = self.genNum(lvl)%0A self.readNum(nums)%0A d = raw_input(%22enter what you heard(n for exit): %22)%0A if d.lower().find(EXIT_TAG) %3E= 0:%0A IsCon = False%0A break
+k = raw_input(%22press any key to continue, press n for exit: %22)%0A if k.lower().find(EXIT_TAG) %3E= 0:%0A print %22End training...%22%0A isCon = False%0A break%0A print %22%5Cn%5B%253d%5D%22%25( idx,), %22 now level%22, lvl,%22, Please listening and enter what you heard%5Cn%22%0A nums = self.genNum(lvl)%0A self.readNum(nums)%0A d = raw_input()
%0A
|
0e9e63a48c5f3e02fb49d0068363ac5442b39e37
|
Add a body to posts
|
discussion/models.py
|
discussion/models.py
|
from django.contrib.auth.models import User
from django.db import models
class Discussion(models.Model):
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
def __unicode__(self):
return self.name
class Post(models.Model):
discussion = models.ForeignKey(Discussion)
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
posts_file = models.FileField(upload_to='uploads/posts',
blank=True, null=True)
def __unicode__(self):
return self.name
class Comment(models.Model):
post = models.ForeignKey(Post)
user = models.ForeignKey(User)
body = models.TextField()
comment_file = models.FileField(upload_to='uploads/comments',
blank=True, null=True)
def __unicode__(self):
return 'Comment on %s by %s' % (self.post.name, self.user)
|
Python
| 0.000001 |
@@ -442,24 +442,54 @@
SlugField()%0A
+ body = models.TextField()%0A
posts_fi
|
62beb09ca1ecde8be4945016ae09beaad2dad597
|
Create disemvowel_trolls.py
|
disemvowel_trolls.py
|
disemvowel_trolls.py
|
Python
| 0.000001 |
@@ -0,0 +1,217 @@
+#Kunal Gautam%0A#Codewars : @Kunalpod%0A#Problem name: Disemvowel Trolls%0A#Problem level: 7 kyu%0A%0Adef disemvowel(string):%0A return ''.join(%5Bletter for letter in string if letter.lower() not in %5B'a', 'e', 'i', 'o', 'u'%5D%5D)%0A
|
|
078bc9ea1375ac8ff7b2bbb92553ae63e5190cd3
|
add var.py in package structData to save vars
|
trunk/editor/structData/var.py
|
trunk/editor/structData/var.py
|
Python
| 0 |
@@ -0,0 +1,201 @@
+#!/usr/bin/env python%0A%0Aclass Var(object):%0A%0A def __init__(self, name, start_value, set_value=None):%0A self.name = name%0A self.start_value = start_value%0A self.set_value = set_value%0A
|
|
a26f0cc1af189686a24518510095f93b064a36a4
|
Add two utility functions for group membership
|
django_split/base.py
|
django_split/base.py
|
import six
import inflection
from .validation import validate_experiment
EXPERIMENTS = {}
class ExperimentMeta(type):
def __init__(self, name, bases, dict):
super(ExperimentMeta, self).__init__(name, bases, dict)
# Special case: don't do experiment processing on the base class
if (
name == 'Experiment' and
self.__module__ == ExperimentMeta.__module__
):
return
slug = inflection.underscore(name)
if len(slug) > 48:
raise ValueError("Experiment name too long")
if slug in EXPERIMENTS:
raise AssertionError(
"Experiment %s defined multiple times (as %s.%s and %s.%s)" % (
slug,
dict['__module__'],
dict['__qualname__'],
EXPERIMENTS[slug].__module__,
EXPERIMENTS[slug].__qualname__,
),
)
validate_experiment(self)
self.slug = slug
EXPERIMENTS[slug] = self
class Experiment(six.with_metaclass(ExperimentMeta)):
groups = ('control', 'experiment')
control_group = 'control'
superuser_group = None
include_new_users = True
include_old_users = True
metrics = ()
start_date = None
end_date = None
|
Python
| 0.000001 |
@@ -15,19 +15,116 @@
ort
-inflection%0A
+datetime%0Aimport inflection%0A%0Afrom django.contrib.auth.models import User%0A%0Afrom .models import ExperimentGroup
%0Afro
@@ -1408,16 +1408,942 @@
d_date = None%0A
+%0A @classmethod%0A def group(cls, group_name):%0A # This will raise a ValueError if the group does not exist. Whilst%0A # group_index is not used if we're before the experiment start date,%0A # we want to catch errors from using the wrong group name immediately.%0A group_index = groups.index(group_name)%0A%0A # TODO: superuser logic%0A%0A # Until the start of the experiment, all users are in the control group%0A if datetime.date.today() %3C self.start_date:%0A if group_name == self.control_group:%0A return User.objects.all()%0A else:%0A return User.objects.none()%0A%0A return User.objects.filter(id__in=%0A ExperimentGroup.objects.filter(%0A experiment=self.slug,%0A group=group_index,%0A ),%0A )%0A%0A @classmethod%0A def in_group(cls, user, group):%0A return user in cls.group(group)%0A
|
316d0518f2cf81ce3045335b79bc993020befce1
|
create main class `FlaskQuik` for bridging quik and flask
|
flask_quik.py
|
flask_quik.py
|
Python
| 0 |
@@ -0,0 +1,1438 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%22%22%22%0A flask.ext.quik%0A ~~~~~~~~~~~~~~%0A%0A Extension implementing Quik Templates support in Flask with support for%0A flask-babel%0A%0A :copyright: (c) 2012 by Thiago Avelino %[email protected]%3E%0A :license: MIT, see LICENSE for more details.%0A%22%22%22%0Afrom quik import FileLoader%0A%0A%0Aclass FlaskQuik(object):%0A %22%22%22%0A Main class for bridging quik and flask. We try to stay as close as possible%0A to how Jinja2 is used in Flask, while at the same time surfacing the useful%0A stuff from Quik.%0A%0A %22%22%22%0A def __init__(self, app=None):%0A self.app = None%0A if app is not None:%0A self.init_app(app)%0A self.app = app%0A%0A def init_app(self, app):%0A %22%22%22%0A Initialize a :class:%60~flask.Flask%60 application%0A for use with this extension. This method is useful for the factory%0A pattern of extension initialization. Example::%0A%0A quik = FlaskQuik()%0A%0A app = Flask(__name__)%0A quik.init_app(app)%0A%0A .. note::%0A This call will fail if you called the :class:%60FlaskQuik%60%0A constructor with an %60%60app%60%60 argument.%0A %22%22%22%0A if self.app:%0A raise RuntimeError(%22Cannot call init_app when app argument was %22%0A %22provided to FlaskQuik constructor.%22)%0A%0A if not hasattr(app, 'extensions'):%0A app.extensions = %7B%7D%0A%0A app.extensions%5B'quik'%5D = self%0A%0A
|
|
4844ac93326186ded80147a3f8e1e1429212428b
|
add user's launcher
|
tfx/experimental/templates/taxi/stub_component_launcher.py
|
tfx/experimental/templates/taxi/stub_component_launcher.py
|
Python
| 0.000001 |
@@ -0,0 +1,2813 @@
+# Lint as: python3%0A# Copyright 2020 Google LLC. All Rights Reserved.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%22%22%22Stub component launcher for launching stub executors in KFP.%22%22%22%0A%0Afrom __future__ import absolute_import%0Afrom __future__ import division%0Afrom __future__ import print_function%0A%0Afrom tfx.experimental.pipeline_testing import base_stub_executor%0Afrom tfx.experimental.pipeline_testing import stub_component_launcher%0Afrom tfx.experimental.templates.taxi.pipeline import configs%0A%0Aclass StubComponentLauncher(stub_component_launcher.StubComponentLauncher):%0A %22%22%22Responsible for launching stub executors in KFP Template.%0A This stub component launcher cannot be defined in the kubeflow_dag_runner.py%0A because launcher class is imported by the module path.%0A %22%22%22%0A def __init__(self, **kwargs):%0A super(StubComponentLauncher, self).__init__(**kwargs)%0A%0A # TODO(StubExecutor): GCS directory where KFP outputs are recorded%0A self.test_data_dir = %22gs://%7B%7D/testdata%22.format(configs.GCS_BUCKET_NAME)%0A # TODO(StubExecutor): customize self.stubbed_component_ids to replace components%0A # with BaseStubExecutor%0A self.stubbed_component_ids = %5B'CsvExampleGen', 'StatisticsGen',%0A 'SchemaGen', 'ExampleValidator',%0A 'Trainer', 'Transform', 'Evaluator', 'Pusher'%5D%0A # TODO(StubExecutor): (Optional) Use stubbed_component_map to insert custom stub%0A # executor class as a value and component id as a key.%0A self.stubbed_component_map = %7B%7D%0A for c_id in self.stubbed_component_ids:%0A self.stubbed_component_map%5Bc_id%5D = base_stub_executor.BaseStubExecutor%0A%0Adef get_stub_launcher_class(stub_launcher: Type%5BStubComponentLauncher%5D,%0A test_data_dir: Text, %0A stubbed_component_ids: List%5BText%5D,%0A stubbed_component_map: Dict%5BText, Type%5Bbase_stub_executor.BaseStubExecutor%5D%5D%0A ) -%3E Type%5BStubComponentLauncher%5D:%0A %22%22%22Returns a StubComponentLauncher class.%0A Returns:%0A StubComponentLauncher class holding stub executors.%0A %22%22%22%0A stub_launcher.stubbed_component_map = dict(stubbed_component_map)%0A for component_id in stubbed_component_ids:%0A stub_launcher.stubbed_component_map%5Bcomponent_id%5D = %5C%0A base_stub_executor.BaseStubExecutor%0A stub_launcher.test_data_dir = test_data_dir%0A return stub_launcher%0A
|
|
20d77f66e0287b3aab08b4cf14f23e7e5672aefd
|
Create database import script for the Picks table (each NFLPool Player's picks for a given season)
|
db_setup/nflpool_picks.py
|
db_setup/nflpool_picks.py
|
Python
| 0 |
@@ -0,0 +1,2493 @@
+import sqlite3%0A%0Aconn = sqlite3.connect('nflpool.sqlite')%0Acur = conn.cursor()%0A%0A# Do some setup%0Acur.executescript('''%0ADROP TABLE IF EXISTS Player;%0A%0ACREATE TABLE Picks (%0A firstname TEXT NOT NULL,%0A lastname TEXT NOT NULL,%0A id INTEGER NOT NULL PRIMARY KEY UNIQUE,%0A season TEXT NOT NULL UNIQUE,%0A email TEXT NOT NULL UNIQUE,%0A timestamp TEXT NOT NULL%0A key%0A afc_east_first TEXT NOT NULL%0A afc_east_second TEXT NOT NULL%0A afc_east_last TEXT NOT NULL%0A afc_north_first TEXT NOT NULL%0A afc_north_second TEXT NOT NULL%0A afc_north_last TEXT NOT NULL%0A afc_south_first TEXT NOT NULL%0A afc_south_second TEXT NOT NULL%0A afc_south_last TEXT NOT NULL%0A afc_west_first TEXT NOT NULL%0A afc_west_second TEXT NOT NULL%0A afc_west_last TEXT NOT NULL%0A nfc_east_first TEXT NOT NULL%0A nfc_east_second TEXT NOT NULL%0A nfc_east_last TEXT NOT NULL%0A nfc_north_first TEXT NOT NULL%0A nfc_north_second TEXT NOT NULL%0A nfc_north_last TEXT NOT NULL%0A nfc_south_first TEXT NOT NULL%0A nfc_south_second TEXT NOT NULL%0A nfc_south_last TEXT NOT NULL%0A nfc_west_first TEXT NOT NULL%0A nfc_west_second TEXT NOT NULL%0A nfc_west_last TEXT NOT NULL%0A afc_wildcard1 TEXT NOT NULL%0A afc_wildcard2 TEXT NOT NULL%0A nfc_wildcard1 TEXT NOT NULL%0A nfc_wildcard2 TEXT NOT NULL%0A afc_rushing_first TEXT NOT NULL%0A afc_rushing_second TEXT NOT NULL%0A afc_rushing_third TEXT NOT NULL%0A afc_passing_first TEXT NOT NULL%0A afc_passing_second TEXT NOT NULL%0A afc_passing_third TEXT NOT NULL%0A afc_receiving_first TEXT NOT NULL%0A afc_receiving_second TEXT NOT NULL%0A afc_receiving_third TEXT NOT NULL%0A afc_sacks_first TEXT NOT NULL%0A afc_sacks_second TEXT NOT NULL%0A afc_sacks_third TEXT NOT NULL%0A afc_int_first TEXT NOT NULL%0A afc_int_second TEXT NOT NULL%0A afc_int_third TEXT NOT NULL%0A nfc_rushing_first TEXT NOT NULL%0A nfc_rushing_second TEXT NOT NULL%0A nfc_rushing_third TEXT NOT NULL%0A nfc_passing_first TEXT NOT NULL%0A nfc_passing_second TEXT NOT NULL%0A nfc_passing_third TEXT NOT NULL%0A nfc_receiving_first TEXT NOT NULL%0A nfc_receiving_second TEXT NOT NULL%0A nfc_receiving_third TEXT NOT NULL%0A nfc_sacks_first TEXT NOT NULL%0A nfc_sacks_second TEXT NOT NULL%0A nfc_sacks_third TEXT NOT NULL%0A nfc_int_first TEXT NOT NULL%0A nfc_int_second TEXT NOT NULL%0A nfc_int_third TEXT NOT NULL%0A afc_pf TEXT NOT NULL%0A nfc_pf TEXT NOT NULL%0A specialteams_td TEXT NOT NULL%0A%0A)%0A''')%0Aconn.commit()%0Aconn.close()%0A
|
|
ed1cd0f7de1a7bebaaf0f336ba52e04286dd87de
|
Create my_mapper.py
|
Hadoop--Project-to-map-new-Your-taxi-data-info/my_mapper.py
|
Hadoop--Project-to-map-new-Your-taxi-data-info/my_mapper.py
|
Python
| 0.000014 |
@@ -0,0 +1,534 @@
+#!/usr/bin/env python %0A%0A%0Aimport sys%0A%0A%0Afor line in sys.stdin:%0A line = line.strip()%0A unpacked = line.split(%22,%22)%0A stadium, capacity, expanded, location, surface, turf, team, opened, weather, roof, elevation = line.split(%22,%22)%0A #medallion, hack_license, vendor_id, rate_code, store_and_fwd_flag, pickup_datetime, dropoff_datetime, passenger_count, trip_time_in_secs, trip_distance, pickup_longitude, pickup_latitude, dropoff_longitude, dropoff_latitude = line.split(%22,%22)%0A%0A results = %5Bturf, %221%22%5D%0A print(%22%5Ct%22.join(results))%0A
|
|
8ad4627973db344e228a9170aef030ab58efdeb9
|
Add column order and importable objects lists
|
src/ggrc/converters/__init__.py
|
src/ggrc/converters/__init__.py
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
from ggrc.converters.sections import SectionsConverter
all_converters = [('sections', SectionsConverter)]
HANDLERS = {}
def get_converter(name):
return all_converters(name)
|
Python
| 0 |
@@ -286,16 +286,308 @@
nverter%0A
+from ggrc.models import (%0A Audit, Control, ControlAssessment, DataAsset, Directive, Contract,%0A Policy, Regulation, Standard, Facility, Market, Objective, Option,%0A OrgGroup, Vendor, Person, Product, Program, Project, Request, Response,%0A Section, Clause, System, Process, Issue,%0A)%0A%0A
%0Aall_con
@@ -646,16 +646,17 @@
S = %7B%7D%0A%0A
+%0A
def get_
@@ -672,16 +672,16 @@
(name):%0A
-
return
@@ -702,8 +702,871 @@
s(name)%0A
+%0ACOLUMN_ORDER = (%0A %22slug%22,%0A %22title%22,%0A %22description%22,%0A %22notes%22,%0A %22owners%22,%0A)%0A%0AIMPORTABLE = %7B%0A %22audit%22: Audit,%0A %22control%22: Control,%0A %22control assessment%22: ControlAssessment,%0A %22control_assessment%22: ControlAssessment,%0A %22data asset%22: DataAsset,%0A %22data_asset%22: DataAsset,%0A %22directive%22: Directive,%0A %22contract%22: Contract,%0A %22policy%22: Policy,%0A %22regulation%22: Regulation,%0A %22standard%22: Standard,%0A %22facility%22: Facility,%0A %22market%22: Market,%0A %22objective%22: Objective,%0A %22option%22: Option,%0A %22org group%22: OrgGroup,%0A %22org_group%22: OrgGroup,%0A %22vendor%22: Vendor,%0A %22person%22: Person,%0A %22product%22: Product,%0A %22program%22: Program,%0A %22project%22: Project,%0A %22request%22: Request,%0A %22response%22: Response,%0A %22section%22: Section,%0A %22clause%22: Clause,%0A %22system%22: System,%0A %22process%22: Process,%0A %22issue%22: Issue,%0A%7D%0A
|
8141d6cafb4a1c8986ec7065f27d536d98cc9916
|
Add little script calculate sample spectra.
|
Modules/Biophotonics/python/iMC/script_plot_one_spectrum.py
|
Modules/Biophotonics/python/iMC/script_plot_one_spectrum.py
|
Python
| 0 |
@@ -0,0 +1,1939 @@
+'''%0ACreated on Oct 12, 2015%0A%0A@author: wirkert%0A'''%0A%0Aimport pickle%0Aimport logging%0A%0Aimport numpy as np%0Aimport matplotlib.pyplot as plt%0Aimport luigi%0A%0Aimport tasks_regression as rt%0Afrom msi.plot import plot%0Afrom msi.msi import Msi%0Aimport msi.normalize as norm%0Aimport scriptpaths as sp%0A%0Asp.ROOT_FOLDER = %22/media/wirkert/data/Data/2015_xxxx_plot_one_spectrum%22%0A%0A# the wavelengths recorded by our camera%0ARECORDED_WAVELENGTHS = %5C%0A np.array(%5B580, 470, 660, 560, 480, 511, 600, 700%5D) * 10 ** -9%0APARAMS = np.array(%5B0.05, # bvf%0A 0.0, # SaO2%0A 0.0, # billirubin%0A 500., # a_mie%0A 0.0, # a_ray%0A 1.091, # b (for scattering%0A 500. * 10 ** -6%5D) # d_muc%0A%0Aclass PlotOneSpectrum(luigi.Task):%0A batch_prefix = luigi.Parameter()%0A%0A def requires(self):%0A return rt.TrainForestForwardModel(self.batch_prefix)%0A%0A%0A def run(self):%0A f = file(self.input().path, %22r%22)%0A rf = pickle.load(f)%0A f.close()%0A%0A refl = rf.predict(PARAMS)%0A%0A msi = Msi(refl)%0A msi.set_wavelengths(RECORDED_WAVELENGTHS)%0A norm.standard_normalizer.normalize(msi)%0A%0A plot(msi)%0A plt.gca().set_xlabel(%22wavelength%22)%0A plt.gca().set_ylabel(%22normalized reflectance%22)%0A plt.grid()%0A plt.ylim(%5B0.0, 0.4%5D)%0A plt.title(%22bvf: %22 + str(PARAMS%5B0%5D) + %22; saO2: %22 + str(PARAMS%5B1%5D) +%0A %22; bili: %22 + str(PARAMS%5B2%5D) + %22; a_mie: %22 + str(PARAMS%5B3%5D) +%0A %22; a_ray: %22 + str(PARAMS%5B4%5D) + %22; d_muc: %22 + str(PARAMS%5B6%5D))%0A plt.show()%0A%0A%0Aif __name__ == '__main__':%0A logging.basicConfig(level=logging.INFO)%0A luigi.interface.setup_interface_logging()%0A sch = luigi.scheduler.CentralPlannerScheduler()%0A w = luigi.worker.Worker(scheduler=sch)%0A main_task = PlotOneSpectrum(batch_prefix=%0A %22jacques_no_billi_generic_scattering_%22)%0A w.add(main_task)%0A w.run()%0A
|
|
55aae76ae3813045542b8f94736fdfb1e08592f2
|
Add chrome driver path.
|
src/lib/environment/__init__.py
|
src/lib/environment/__init__.py
|
import os
import logging
from lib import constants, file_ops
yaml = file_ops.load_yaml_contents(constants.path.YAML)
PROJECT_ROOT_PATH = os.path.dirname(os.path.abspath(__file__)) + "/../"
VIRTENV_PATH = PROJECT_ROOT_PATH + constants.path.VIRTUALENV_DIR
LOGGING_FORMAT = yaml[constants.yaml.LOGGING][constants.yaml.FORMAT]
# register loggers
selenium_logger = logging.getLogger(constants.log.Selenium.SELENIUM_REMOTE_CONNECTION)
# Only display possible problems
selenium_logger.setLevel(logging.WARNING)
|
Python
| 0 |
@@ -317,16 +317,113 @@
.FORMAT%5D
+%0ACHROME_DRIVER_PATH = PROJECT_ROOT_PATH + constants.path.RESOURCES + constants.path.CHROME_DRIVER
%0A%0A# regi
|
56422abd9e5dbc1b17b009d84fd5e4b028719b94
|
add basic IPC traffic analyzer
|
ipc-viewer.py
|
ipc-viewer.py
|
Python
| 0 |
@@ -0,0 +1,1300 @@
+#!/usr/bin/python%0A%0A# This Source Code Form is subject to the terms of the Mozilla Public%0A# License, v. 2.0. If a copy of the MPL was not distributed with this%0A# file, You can obtain one at http://mozilla.org/MPL/2.0/.%0A%0A# This file analyzes the output of running with MOZ_IPC_MESSAGE_LOG=1%0A%0Aimport sys%0Aimport re%0A%0AmsgPatt = re.compile('%5E%5C%5Btime:(%5Cd+)%5C%5D%5C%5B(%5Cd+)(-%3E%7C%3C-)(%5Cd+)%5C%5D%5C%5B(%5B%5E%5C%5D%5D+)%5C%5D (Sending%7CReceived)((?: reply)?) (%5B%5E%5C(%5D+)%5C(%5C%5BTODO%5C%5D%5C)$')%0A%0A#%5Btime:1441041587246153%5D%5B9641-%3E9647%5D%5BPPluginScriptableObjectParent%5D Sending reply Reply_NPN_Evaluate(%5BTODO%5D)%0A%0A%0AmatchCount = 0%0AnotMatchCount = 0%0A%0AmsgCounts = %7B%7D%0A%0Afor l in sys.stdin:%0A mm = msgPatt.match(l)%0A if not mm:%0A notMatchCount += 1%0A continue%0A timeStamp = mm.group(1)%0A pid1 = mm.group(2)%0A arrow = mm.group(3)%0A pid2 = mm.group(4)%0A actor = mm.group(5)%0A sendRecv = mm.group(6)%0A sendRecvExtra = not not mm.group(7)%0A msg = mm.group(8)%0A%0A p = (actor, msg)%0A msgCounts%5Bp%5D = msgCounts.setdefault(p, 0) + 1%0A%0A #print timeStamp, pid1, arrow, pid2, actor, sendRecv, sendRecvExtra, msg%0A%0A matchCount += 1%0A%0A%0A# Resort the data a bit.%0A%0Acounts = %5B%5D%0Afor p, count in msgCounts.iteritems():%0A counts.append((count, p))%0A%0Acounts.sort()%0Acounts.reverse()%0A%0Afor (count, (actor, msg)) in counts:%0A print count, actor, msg%0A%0A%0A%0A%0A%0A
|
|
561957a2492714e1b6d76b13daeced66a90aba1d
|
Create __init__.py
|
docs/_themes/sphinx_rtd_theme/__init__.py
|
docs/_themes/sphinx_rtd_theme/__init__.py
|
Python
| 0.000429 |
@@ -0,0 +1,371 @@
+%22%22%22Sphinx ReadTheDocs theme.%0A%0AFrom https://github.com/ryan-roemer/sphinx-bootstrap-theme.%0A%0A%22%22%22%0Aimport os%0A%0AVERSION = (0, 1, 5)%0A%0A__version__ = %22.%22.join(str(v) for v in VERSION)%0A__version_full__ = __version__%0A%0A%0Adef get_html_theme_path():%0A %22%22%22Return list of HTML theme paths.%22%22%22%0A cur_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))%0A return cur_dir%0A
|
|
c979fe37cc5f3dd83933893a1e7774c4aa7d061c
|
Add test script.
|
examples/get_data.py
|
examples/get_data.py
|
Python
| 0 |
@@ -0,0 +1,1625 @@
+'''%0A Copyright 2019 Trustees of the University of Pennsylvania%0A%0A Licensed under the Apache License, Version 2.0 (the %22License%22);%0A you may not use this file except in compliance with the License.%0A You may obtain a copy of the License at%0A%0A http://www.apache.org/licenses/LICENSE-2.0%0A%0A Unless required by applicable law or agreed to in writing, software%0A distributed under the License is distributed on an %22AS IS%22 BASIS,%0A WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A See the License for the specific language governing permissions and%0A limitations under the License.%0A'''%0A%0Aimport argparse%0Aimport getpass%0Afrom ieeg.auth import Session%0A%0A%0Adef main():%0A parser = argparse.ArgumentParser()%0A parser.add_argument('-u', '--user', required=True, help='username')%0A parser.add_argument('-p', '--password',%0A help='password (will be prompted if missing)')%0A%0A parser.add_argument('dataset', help='dataset name')%0A%0A args = parser.parse_args()%0A%0A if not args.password:%0A args.password = getpass.getpass()%0A%0A with Session(args.user, args.password) as session:%0A dataset_name = args.dataset%0A dataset = session.open_dataset(dataset_name)%0A raw_data = dataset.get_data(0, 2000, %5B0, 1%5D)%0A print(2000, raw_data.shape)%0A print(raw_data)%0A raw_data = dataset.get_data(0, 4000, %5B0, 1%5D)%0A print(4000, raw_data.shape)%0A print(raw_data)%0A raw_data = dataset.get_data(0, 6000, %5B0, 1%5D)%0A print(6000, raw_data.shape)%0A print(raw_data)%0A session.close_dataset(dataset_name)%0A%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
|
|
11bd97a647507645f90e259dd8000eb6a8001890
|
Add index to log_once table, make cleanup run with db cleanup event. refs #1167
|
flexget/utils/log.py
|
flexget/utils/log.py
|
"""Logging utilities"""
import logging
from flexget.manager import Session, Base
from datetime import datetime, timedelta
from sqlalchemy import Column, Integer, String, DateTime
log = logging.getLogger('util.log')
class LogMessage(Base):
"""Declarative"""
__tablename__ = 'log_once'
id = Column(Integer, primary_key=True)
md5sum = Column(String)
added = Column(DateTime, default=datetime.now())
def __init__(self, md5sum):
self.md5sum = md5sum
def __repr__(self):
return "<LogMessage('%s')>" % (self.md5sum)
def purge():
"""Purge old messages from database"""
old = datetime.now() - timedelta(days=365)
session = Session()
try:
for message in session.query(LogMessage).filter(LogMessage.added < old):
log.debug('purging: %s' % message)
session.delete(message)
finally:
session.commit()
def log_once(message, logger=logging.getLogger('log_once')):
"""Log message only once using given logger. Returns False if suppressed logging."""
purge()
import hashlib
digest = hashlib.md5()
digest.update(message.encode('latin1', 'replace')) # ticket:250
md5sum = digest.hexdigest()
session = Session()
try:
# abort if this has already been logged
if session.query(LogMessage).filter_by(md5sum=md5sum).first():
session.close()
return False
row = LogMessage(md5sum)
session.add(row)
finally:
session.commit()
logger.info(message)
return True
|
Python
| 0.000008 |
@@ -37,49 +37,22 @@
ing%0A
-from flexget.manager import Session, Base
+import hashlib
%0Afro
@@ -149,45 +149,545 @@
Time
-%0A%0Alog = logging.getLogger('util.log')
+, Index%0Afrom flexget import schema%0Afrom flexget.utils.sqlalchemy_utils import table_schema%0Afrom flexget.manager import Session%0Afrom flexget.event import event%0A%0Alog = logging.getLogger('util.log')%0ABase = schema.versioned_base('log_once', 0)%0A%0A%[email protected]('log_once')%0Adef upgrade(ver, session):%0A if ver is None:%0A log.info('Adding index to md5sum column of log_once table.')%0A table = table_schema('log_once', session)%0A Index('log_once_md5sum', table.c.md5sum, unique=True).create()%0A ver = 0%0A return ver
%0A%0A%0Ac
@@ -833,16 +833,29 @@
n(String
+, unique=True
)%0A ad
@@ -1038,24 +1038,53 @@
f.md5sum)%0A%0A%0A
+@event('manager.db_cleanup')%0A
def purge():
@@ -1081,16 +1081,23 @@
f purge(
+session
):%0A %22
@@ -1185,63 +1185,21 @@
65)%0A
- session = Session()%0A try:%0A for message in
+%0A result =
ses
@@ -1251,22 +1251,41 @@
d %3C old)
-:%0A
+.delete()%0A if result:%0A
@@ -1292,111 +1292,65 @@
log.
-debug('purging: %25s' %25 message)%0A session.delete(message)%0A finally:%0A session.commit(
+verbose('Purged %25s entries from log_once table.' %25 result
)%0A%0A%0A
@@ -1503,39 +1503,8 @@
%22%22%22%0A
- purge()%0A%0A import hashlib
%0A
|
b3977289de72421530614ff4f28cdf7333d743e4
|
Add region migration validation
|
dbaas/logical/validators.py
|
dbaas/logical/validators.py
|
Python
| 0 |
@@ -0,0 +1,931 @@
+# -*- coding: utf-8 -*-%0Afrom django.utils.translation import ugettext_lazy as _%0Afrom logical.models import Database%0Afrom django.core.exceptions import ValidationError%0Afrom django.core.exceptions import ObjectDoesNotExist%0Afrom system.models import Configuration%0A%0A%0Adef validate_evironment(database_name, environment_name):%0A try:%0A database = Database.objects.get(database_name)%0A except ObjectDoesNotExist:%0A pass%0A else:%0A dev_envs = Configuration.get_by_name_as_list('dev_envs')%0A new_db_env_is_not_dev = environment_name not in dev_envs%0A%0A prod_envs = Configuration.get_by_name_as_list('prod_envs')%0A db_env_is_prod = database.environment.name in prod_envs%0A%0A if new_db_env_is_not_dev and db_env_is_prod:%0A raise ValidationError(%0A _('%25(database_name)s already exists in production!'),%0A params=%7B'database_name': database_name%7D,%0A )%0A
|
|
f0e092b060d9afb700f027197fdf44eeb2fdd91b
|
Create __init__.py
|
__ini__.py
|
__ini__.py
|
Python
| 0.000429 |
@@ -0,0 +1 @@
+%0A
|
|
3ce3e6adb3d47989338866da1b840b77e6ceab5a
|
remove some debug code
|
src/you_get/extractors/iqiyi.py
|
src/you_get/extractors/iqiyi.py
|
#!/usr/bin/env python
__all__ = ['iqiyi_download']
from ..common import *
from uuid import uuid4
from random import random,randint
import json
from math import floor
import hashlib
'''
com.qiyi.player.core.model.def.DefinitonEnum
bid meaning for quality
0 none
1 standard
2 high
3 super
4 suprt-high
5 fullhd
10 4k
96 topspeed
'''
def getVRSXORCode(arg1,arg2):
loc3=arg2 %3
if loc3 == 1:
return arg1^121
if loc3 == 2:
return arg1^72
return arg1^103
def getVrsEncodeCode(vlink):
loc6=0
loc2=''
loc3=vlink.split("-")
loc4=len(loc3)
# loc5=loc4-1
for i in range(loc4-1,-1,-1):
loc6=getVRSXORCode(int(loc3[loc4-i-1],16),i)
loc2+=chr(loc6)
return loc2[::-1]
def getVMS(tvid,vid,uid):
#tm ->the flash run time for md5 usage
#um -> vip 1 normal 0
#authkey -> for password protected video ,replace '' with your password
#puid user.passportid may empty?
#TODO: support password protected video
tm=randint(100,1000)
vmsreq='http://cache.video.qiyi.com/vms?key=fvip&src=1702633101b340d8917a69cf8a4b8c7' +\
"&tvId="+tvid+"&vid="+vid+"&vinfo=1&tm="+str(tm)+\
"&enc="+hashlib.new('md5',bytes('ts56gh'+str(tm)+tvid,"utf-8")).hexdigest()+\
"&qyid="+uid+"&tn="+str(random()) +"&um=0" +\
"&authkey="+hashlib.new('md5',bytes(''+str(tm)+tvid,'utf-8')).hexdigest()
tmp = get_content(vmsreq)
return json.loads(tmp)
def getDispathKey(rid):
tp=")(*&^flash@#$%a" #magic from swf
time=json.loads(get_content("http://data.video.qiyi.com/t?tn="+str(random())))["t"]
t=str(int(floor(int(time)/(10*60.0))))
return hashlib.new("md5",bytes(t+tp+rid,"utf-8")).hexdigest()
def iqiyi_download(url, output_dir = '.', merge = True, info_only = False):
gen_uid=uuid4().hex
html = get_html(url)
tvid = r1(r'data-player-tvid="([^"]+)"', html)
videoid = r1(r'data-player-videoid="([^"]+)"', html)
assert tvid
assert videoid
info = getVMS(tvid,videoid,gen_uid)
assert info["code"] == "A000000"
title = info["data"]["vi"]["vn"]
# data.vp = json.data.vp
# data.vi = json.data.vi
# data.f4v = json.data.f4v
# if movieIsMember data.vp = json.data.np
#for highest qualities
#for http://www.iqiyi.com/v_19rrmmz5yw.html not vp -> np
try:
if info["data"]['vp']["tkl"]=='' :
raise ValueError
except:
log.e("[Error] Do not support for iQIYI VIP video.")
exit(-1)
bid=0
for i in info["data"]["vp"]["tkl"][0]["vs"]:
if int(i["bid"])<=10 and int(i["bid"])>=bid:
bid=int(i["bid"])
video_links=i["flvs"] #now in i["flvs"] not in i["fs"]
urls=[]
size=0
for i in video_links:
vlink=i["l"]
if not vlink.startswith("/"):
#vlink is encode
vlink=getVrsEncodeCode(vlink)
key=getDispathKey(vlink.split("/")[-1].split(".")[0])
size+=i["b"]
baseurl=info["data"]["vp"]["du"].split("/")
baseurl.insert(-1,key)
url="/".join(baseurl)+vlink+'?su='+gen_uid+'&qyid='+uuid4().hex+'&client=&z=&bt=&ct=&tn='+str(randint(10000,20000))
urls.append(json.loads(get_content(url))["l"])
#download should be complete in 10 minutes
#because the url is generated before start downloading
#and the key may be expired after 10 minutes
print_info(site_info, title, 'flv', size)
if not info_only:
download_urls(urls, title, 'flv', size, output_dir = output_dir, merge = merge)
site_info = "iQIYI.com"
download = iqiyi_download
download_playlist = playlist_not_supported('iqiyi')
|
Python
| 0.025468 |
@@ -1424,14 +1424,26 @@
-tmp =
+return json.loads(
get_
@@ -1461,34 +1461,8 @@
req)
-%0A return json.loads(tmp
)%0A%0Ad
|
660fc806d11c6a8af321bb14caec21ca7cba4141
|
add kafka streaming consumer
|
deploy/test/kf_consumer1.py
|
deploy/test/kf_consumer1.py
|
Python
| 0 |
@@ -0,0 +1,339 @@
+import json%0Afrom kafka import KafkaConsumer%0A%0Aconsumer = KafkaConsumer('testres', bootstrap_servers='192.168.33.50:9092')%0A%0Afor msg in consumer:%0A val = msg.value.decode()%0A%0A print(msg.key.decode())%0A print(json.loads(val).get('word'))%0A print(json.loads(val).get('count'))%0A print(json.loads(val).get('window'))%0A print('='*30)%0A
|
|
4c96e1eb17a5cbb4c1a33cef5c37aac00b4ec8e0
|
Update test_api.py
|
dpaste/tests/test_api.py
|
dpaste/tests/test_api.py
|
# -*- encoding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.test.client import Client
from django.test import TestCase
from ..models import Snippet
from ..forms import EXPIRE_DEFAULT
from ..highlight import LEXER_DEFAULT
class SnippetAPITestCase(TestCase):
def setUp(self):
self.api_url = reverse('dpaste_api_create_snippet')
self.client = Client()
def test_empty(self):
"""
The browser sent a content field but with no data.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ALL tests fail due to a Piston bug:
https://bitbucket.org/jespern/django-piston/issue/221/attributeerror-httpresponseservererror
"""
data = {}
# No data
response = self.client.post(self.api_url, {})
self.assertEqual(response.status_code, 400)
self.assertEqual(Snippet.objects.count(), 0)
# No content
data['content'] = ''
response = self.client.post(self.api_url, data)
self.assertEqual(response.status_code, 400)
self.assertEqual(Snippet.objects.count(), 0)
# Just some spaces
data['content'] = ' '
response = self.client.post(self.api_url, data)
self.assertEqual(response.status_code, 400)
self.assertEqual(Snippet.objects.count(), 0)
# Linebreaks or tabs only are not valid either
data['content'] = '\n\t '
response = self.client.post(self.api_url, data)
self.assertEqual(response.status_code, 400)
self.assertEqual(Snippet.objects.count(), 0)
def test_valid(self):
"""
A valid snippet, contains Unicode, tabs, spaces, linebreaks etc.
"""
data = {'content': u"Hello Wörld.\n\tGood Bye"}
response = self.client.post(self.api_url, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(Snippet.objects.count(), 1)
# The response is a URL with quotes
self.assertTrue(response.content.startswith('"'))
self.assertTrue(response.content.endswith('"'))
# The URL returned is the absolute url to the snippet.
# If we call that url our snippet should be in the page content.
snippet_url = response.content[1:-1]
response = self.client.get(snippet_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, data['content'])
|
Python
| 0.000004 |
@@ -491,199 +491,8 @@
ata.
-%0A%0A !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!%0A%0A ALL tests fail due to a Piston bug:%0A https://bitbucket.org/jespern/django-piston/issue/221/attributeerror-httpresponseservererror
%0A
|
c831e7cec02e06d9346bf6fdf0dcdf553f4f479e
|
Add test for interpolating NaNs
|
metpy/calc/tests/test_tools.py
|
metpy/calc/tests/test_tools.py
|
# Copyright (c) 2008-2015 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Tests for `calc.tools` module."""
import numpy as np
import pytest
from metpy.calc import find_intersections, nearest_intersection_idx, resample_nn_1d
from metpy.testing import assert_array_almost_equal, assert_array_equal
def test_resample_nn():
"""Test 1d nearest neighbor functionality."""
a = np.arange(5.)
b = np.array([2, 3.8])
truth = np.array([2, 4])
assert_array_equal(truth, resample_nn_1d(a, b))
def test_nearest_intersection_idx():
"""Test nearest index to intersection functionality."""
x = np.linspace(5, 30, 17)
y1 = 3 * x**2
y2 = 100 * x - 650
truth = np.array([2, 12])
assert_array_equal(truth, nearest_intersection_idx(y1, y2))
@pytest.mark.parametrize('direction, expected', [
('all', np.array([[8.88, 24.44], [238.84, 1794.53]])),
('increasing', np.array([[24.44], [1794.53]])),
('decreasing', np.array([[8.88], [238.84]]))
])
def test_find_intersections(direction, expected):
"""Test finding the intersection of two curves functionality."""
x = np.linspace(5, 30, 17)
y1 = 3 * x**2
y2 = 100 * x - 650
# Note: Truth is what we will get with this sampling, not the mathematical intersection
assert_array_almost_equal(expected, find_intersections(x, y1, y2, direction=direction), 2)
def test_find_intersections_no_intersections():
"""Test finding the intersection of two curves with no intersections."""
x = np.linspace(5, 30, 17)
y1 = 3 * x + 0
y2 = 5 * x + 5
# Note: Truth is what we will get with this sampling, not the mathematical intersection
truth = np.array([[],
[]])
assert_array_equal(truth, find_intersections(x, y1, y2))
def test_find_intersections_invalid_direction():
"""Test exception if an invalid direction is given."""
x = np.linspace(5, 30, 17)
y1 = 3 * x ** 2
y2 = 100 * x - 650
with pytest.raises(ValueError):
find_intersections(x, y1, y2, direction='increaing')
|
Python
| 0.000033 |
@@ -231,16 +231,17 @@
import
+(
find_int
@@ -251,16 +251,34 @@
ections,
+ interpolate_nans,
nearest
@@ -295,16 +295,40 @@
ion_idx,
+%0A
resampl
@@ -334,16 +334,17 @@
le_nn_1d
+)
%0Afrom me
@@ -2157,8 +2157,908 @@
eaing')%0A
+%0A%0Adef test_interpolate_nan_linear():%0A %22%22%22Test linear interpolation of arrays with NaNs in the y-coordinate.%22%22%22%0A x = np.linspace(0, 20, 15)%0A y = 5 * x + 3%0A nan_indexes = %5B1, 5, 11, 12%5D%0A y_with_nan = y.copy()%0A y_with_nan%5Bnan_indexes%5D = np.nan%0A assert_array_almost_equal(y, interpolate_nans(x, y_with_nan), 2)%0A%0A%0Adef test_interpolate_nan_log():%0A %22%22%22Test log interpolation of arrays with NaNs in the y-coordinate.%22%22%22%0A x = np.logspace(1, 5, 15)%0A y = 5 * np.log(x) + 3%0A nan_indexes = %5B1, 5, 11, 12%5D%0A y_with_nan = y.copy()%0A y_with_nan%5Bnan_indexes%5D = np.nan%0A assert_array_almost_equal(y, interpolate_nans(x, y_with_nan, kind='log'), 2)%0A%0A%0Adef test_interpolate_nan_invalid():%0A %22%22%22Test log interpolation with invalid parameter.%22%22%22%0A x = np.logspace(1, 5, 15)%0A y = 5 * np.log(x) + 3%0A with pytest.raises(ValueError):%0A interpolate_nans(x, y, kind='loog')%0A
|
49d28814c498d1698c61b8eeae3c3e3e019a09c3
|
add recipe3 scrap
|
scrap/recipe3.py
|
scrap/recipe3.py
|
Python
| 0.000373 |
@@ -0,0 +1,1508 @@
+import scrapy%0A%0A%0Aclass Recipe3Spider(scrapy.Spider):%0A name = %22recipe3%22%0A download_delay = 0.5%0A start_urls = %5B%0A %22http://www.cuisineaz.com/recettes/recherche_v2.aspx?recherche=%7B%7D%22.format(r)%0A for r in %5B%0A 'bases',%0A 'aperitifs',%0A 'entrees',%0A 'plats',%0A 'desserts',%0A 'accompagnements',%0A 'recettes-pas-cheres',%0A 'viandes',%0A 'poissons',%0A 'legumes',%0A 'fruits',%0A 'fromages',%0A 'repas',%0A 'cher',%0A 'farine',%0A 'sucre',%0A 'facile',%0A %5D%0A %5D%0A%0A def parse(self, response):%0A url = response.css('.pagination-next a::attr(href)').extract_first()%0A if url:%0A page = response.urljoin(url.strip())%0A yield scrapy.Request(page, callback=self.parse)%0A recipes = response.css('#titleRecette a::attr(href)').extract()%0A for recipe in recipes:%0A page = response.urljoin(recipe.strip())%0A yield scrapy.Request(page, callback=self.parse_recipe)%0A return%0A%0A def parse_recipe(self, response):%0A yield %7B%0A 'uri': response.url,%0A 'recipe': response.css('.recipe_main h1::text').extract_first(),%0A 'breadcrumb': %5B%5D,%0A 'quantity': response.css('#ctl00_ContentPlaceHolder_LblRecetteNombre::text').extract_first(),%0A 'content': response.css('.recipe_ingredients ul').extract_first()%0A %7D%0A%0A
|
|
f486343277a94e511ea1e152ca6b69f12fd657a0
|
Create droidgpspush.py
|
droidgpspush.py
|
droidgpspush.py
|
Python
| 0 |
@@ -0,0 +1,501 @@
+import androidhelper%0Aimport socket%0Aimport time%0A%0Adroid = androidhelper.Android()%0A%0Aport=12345%0As=socket.socket(socket.AF_INET,socket.SOCK_STREAM)%0As.connect((%2210.201.19.201%22,port)) #connecting to pi as client%0Adroid.makeToast(%22Starting location fetch%22) #notify me%0Awhile True:%0A%09location = droid.getLastKnownLocation().result%0A%09location = location.get('network', location.get('gps'))%09#fetch location%0A%09data = str(location)%0A%09print(data) #logging%0A%09s.send(data) #send to server%0A%09time.sleep(5) #wait for 5 seconds%0A
|
|
6d25c1958a84eb1a6004ebadec6769511974cca4
|
add basic rsa by request
|
basic-rsa/rsa.py
|
basic-rsa/rsa.py
|
Python
| 0 |
@@ -0,0 +1,324 @@
+def main():%0A e = int('3', 16)%0A n = int('64ac4671cb4401e906cd273a2ecbc679f55b879f0ecb25eefcb377ac724ee3b1', 16)%0A d = int('431d844bdcd801460488c4d17487d9a5ccc95698301d6ab2e218e4b575d52ea3', 16)%0A c = int('599f55a1b0520a19233c169b8c339f10695f9e61c92bd8fd3c17c8bba0d5677e', 16)%0A m = pow(c, d, n)%0A print(hex(m))%0A
|
|
6be3e0c5264ca2750a77ac1dbd4175502e51fd3c
|
Add argparse tests for ceph-deploy admin
|
ceph_deploy/tests/parser/test_admin.py
|
ceph_deploy/tests/parser/test_admin.py
|
Python
| 0 |
@@ -0,0 +1,1018 @@
+import pytest%0A%0Afrom ceph_deploy.cli import get_parser%0A%0A%0Aclass TestParserAdmin(object):%0A%0A def setup(self):%0A self.parser = get_parser()%0A%0A def test_admin_help(self, capsys):%0A with pytest.raises(SystemExit):%0A self.parser.parse_args('admin --help'.split())%0A out, err = capsys.readouterr()%0A assert 'usage: ceph-deploy admin' in out%0A assert 'positional arguments:' in out%0A assert 'optional arguments:' in out%0A%0A def test_admin_host_required(self, capsys):%0A with pytest.raises(SystemExit):%0A self.parser.parse_args('admin'.split())%0A out, err = capsys.readouterr()%0A assert %22error: too few arguments%22 in err%0A%0A def test_admin_one_host(self):%0A args = self.parser.parse_args('admin host1'.split())%0A assert args.client == %5B'host1'%5D%0A%0A def test_admin_multiple_hosts(self):%0A hostnames = %5B'host1', 'host2', 'host3'%5D%0A args = self.parser.parse_args(%5B'admin'%5D + hostnames)%0A assert args.client == hostnames%0A
|
|
2ee5f1e3563e5a7104515adf74e41a8781fbcd9e
|
Create exercise5.py
|
exercise5.py
|
exercise5.py
|
Python
| 0.000001 |
@@ -0,0 +1,626 @@
+ # -- coding: utf-8 -- %0Amy_name = 'Zed A. Shaw'%0Amy_age = 35 # not a lie%0Amy_height = 74 # inches%0Amy_weight = 180 # lbs%0Amy_eyes = 'Blue'%0Amy_teeth = 'White'%0Amy_hair = 'Brown'%0A%0Aprint %22Let's talk about %25s.%22 %25 my_name%0Aprint %22He's %25d inches tall.%22 %25 my_height%0Aprint %22He's %25d pounds heavy.%22 %25 my_weight%0Aprint %22Actually that's not too heavy.%22%0Aprint %22He's got %25s eyes and %25s hair.%22 %25 (my_eyes, my_hair)%0Aprint %22His teeth are usually %25s depending on the coffee.%22 %25 my_teeth%0A%0A# this line is tricky, try to get it exactly right%0Aprint %22If I add %25d, %25d, and %25d I get %25d.%22 %25 (%0A my_age, my_height, my_weight, my_age + my_height + my_weight)%0A
|
|
2523d34d4f3e26a408c7ec0e43708efea77f03a9
|
Add to support the chinese library
|
workflow/cndic_naver_search.py
|
workflow/cndic_naver_search.py
|
Python
| 0 |
@@ -0,0 +1,1820 @@
+# Naver Search Workflow for Alfred 2%0A# Copyright (C) 2013 Jinuk Baek%0A# This program is free software; you can redistribute it and/or%0A# modify it under the terms of the GNU General Public License%0A# as published by the Free Software Foundation; either version 2%0A# of the License, or (at your option) any later version.%0A#%0A# This program is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU General Public License for more details.%0A#%0A# You should have received a copy of the GNU General Public License%0A# along with this program; if not, write to the Free Software%0A# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.%0A%0A%0Aimport sys%0A%0Afrom workflow import web, Workflow%0A%0A%0Adef get_dictionary_data(word):%0A%09url = 'http://ac.cndic.naver.com/ac2'%0A%09params = dict(q=word,%0A%09%09_callback='',%0A%09%09q_enc='utf-8',%0A%09%09st=11,%0A%09%09r_lt='00',%0A%09%09t_koreng=1,%0A%09%09r_format='json',%0A%09%09r_enc='utf-8',%0A%09%09r_unicode=0,%0A%09%09r_escape=1)%0A%0A%09r = web.get(url, params)%0A%09r.raise_for_status()%0A%09return r.json()%0A%0A%0Adef main(wf):%0A%09import cgi;%0A%0A%09args = wf.args%5B0%5D%0A%0A%09wf.add_item(title = 'Search Naver Cndic for %5C'%25s%5C'' %25 args, %0A%09%09%09%09autocomplete=args, %0A%09%09%09%09arg=args,%0A%09%09%09%09valid=True)%0A%0A%09def wrapper():%0A%09%09return get_dictionary_data(args)%0A%0A%09res_json = wf.cached_data(%22cn_%25s%22 %25 args, wrapper, max_age=600)%0A%0A%09for item in res_json%5B'items'%5D:%0A%09%09for ltxt in item:%0A%09%09%09if len(ltxt) %3E 0:%0A%09%09%09%09txt = ltxt%5B0%5D%5B0%5D;%0A%09%09%09%09rtxt = cgi.escape(ltxt%5B1%5D%5B0%5D);%0A%0A%09%09%09%09wf.add_item(title = u%22%25s %25s%22 %25 (txt, rtxt) ,%0A%09%09%09%09%09%09%09subtitle = 'Search Naver Cndic for %5C'%25s%5C'' %25 txt, %0A%09%09%09%09%09%09%09autocomplete=txt, %0A%09%09%09%09%09%09%09arg=txt,%0A%09%09%09%09%09%09%09valid=True);%0A%0A%09wf.send_feedback()%0A%09%09%09%09%0A%0A%0Aif __name__ == '__main__':%0A%09wf = Workflow()%0A%09sys.exit(wf.run(main))%0A%0A%0A
|
|
ac0e7cb6ff2885457ccbe9f7311489edf7c9406b
|
create train object utils
|
mozi/utils/train_object_utils.py
|
mozi/utils/train_object_utils.py
|
Python
| 0.000018 |
@@ -0,0 +1,2587 @@
+from __future__ import absolute_import%0Afrom __future__ import print_function%0A%0Aimport matplotlib%0A# matplotlib.use('Agg')%0Aimport theano%0Aimport theano.tensor as T%0Aimport numpy as np%0Aimport matplotlib.pyplot as plt%0Afrom theano.compile.ops import as_op%0Afrom mozi.utils.progbar import Progbar%0A%0Aimport tarfile, inspect, os%0Afrom six.moves.urllib.request import urlretrieve%0A%0AfloatX = theano.config.floatX%0A%0Adef split_list(tuple_list):%0A %22%22%22%0A DESCRIPTION:%0A split a list of tuples into two lists whereby one list contains the first elements%0A of the tuples and the other list contains the second elements.%0A PARAM:%0A tuple_list: a list of tuples, example tuple_list = %5B('a', 1), ('b', 2)%5D%0A RETURN:%0A two lists, example from above tuple_list will be split into %5B'a', 'b'%5D and %5B1, 2%5D%0A %22%22%22%0A ls_A = %5B%5D%0A ls_B = %5B%5D%0A%0A for tuple in tuple_list:%0A ls_A.append(tuple%5B0%5D)%0A ls_B.append(tuple%5B1%5D)%0A%0A return ls_A, ls_B%0A%0A%0Adef generate_shared_list(ls):%0A %22%22%22%0A DESCRIPTION:%0A generate a list of shared variables that matched the length of ls%0A PARAM:%0A ls: the list used for generating the shared variables%0A RETURN:%0A a list of shared variables initialized to 0 of len(ls)%0A %22%22%22%0A rlist = %5B%5D%0A%0A for i in xrange(len(ls)):%0A rlist.append(theano.shared(np.array(0., dtype=theano.config.floatX)))%0A%0A return rlist%0A%0A%0Adef merge_lists(ls_A, ls_B):%0A %22%22%22%0A DESCRIPTION:%0A merge two lists of equal length into into a list of tuples%0A PARAM:%0A ls_A: first list%0A ls_B: second list%0A RETURN:%0A a list of tuples%0A %22%22%22%0A%0A assert len(ls_A) == len(ls_B), 'two lists of different length'%0A%0A rlist = %5B%5D%0A for a, b in zip(ls_A, ls_B):%0A rlist.append((a,b))%0A%0A return rlist%0A%0A%0Adef get_shared_values(shared_ls):%0A %22%22%22%0A DESCRIPTION:%0A get a list of values from a list of shared variables%0A PARAM:%0A shared_ls: list of shared variables%0A RETURN:%0A numpy array of the list of values%0A %22%22%22%0A%0A val_ls = %5B%5D%0A for var in shared_ls:%0A val_ls.append(var.get_value())%0A%0A return np.asarray(val_ls, dtype=theano.config.floatX)%0A%0A%0Adef is_shared_var(var):%0A return var.__class__.__name__ == 'TensorSharedVariable' or %5C%0A var.__class__.__name__ == 'CudaNdarraySharedVariable'%0A%0A%0Adef merge_var(*vars):%0A def absortvar(v):%0A rvar = %5B%5D%0A if isinstance(v, (list, tuple)):%0A rvar += v%0A else:%0A rvar.append(v)%0A return rvar%0A%0A rvars = %5B%5D%0A for var in vars:%0A rvars += absortvar(var)%0A return rvars%0A
|
|
1768a69163c50e5e964eaf110323e590f13b4ff0
|
add 0000 file
|
Drake-Z/0000/0000.py
|
Drake-Z/0000/0000.py
|
Python
| 0.000001 |
@@ -0,0 +1,680 @@
+#!/usr/bin/env python3%0A# -*- coding: utf-8 -*-%0A%0A'%E7%AC%AC 0000 %E9%A2%98%EF%BC%9A%E5%B0%86%E4%BD%A0%E7%9A%84 QQ %E5%A4%B4%E5%83%8F%EF%BC%88%E6%88%96%E8%80%85%E5%BE%AE%E5%8D%9A%E5%A4%B4%E5%83%8F%EF%BC%89%E5%8F%B3%E4%B8%8A%E8%A7%92%E5%8A%A0%E4%B8%8A%E7%BA%A2%E8%89%B2%E7%9A%84%E6%95%B0%E5%AD%97%EF%BC%8C%E7%B1%BB%E4%BC%BC%E4%BA%8E%E5%BE%AE%E4%BF%A1%E6%9C%AA%E8%AF%BB%E4%BF%A1%E6%81%AF%E6%95%B0%E9%87%8F%E9%82%A3%E7%A7%8D%E6%8F%90%E7%A4%BA%E6%95%88%E6%9E%9C%E3%80%82 %E7%B1%BB%E4%BC%BC%E4%BA%8E%E5%9B%BE%E4%B8%AD%E6%95%88%E6%9E%9C'%0A%0A__author__ = 'Drake-Z'%0A%0Afrom PIL import Image, ImageDraw, ImageFont%0A%0Adef add_num(filname, text = '4', fillcolor = (255, 0, 0)):%0A img = Image.open(filname)%0A width, height = img.size%0A myfont = ImageFont.truetype('C:/windows/fonts/Arial.ttf', size=width//8)%0A fillcolor = (255, 0, 0)%0A draw = ImageDraw.Draw(img)%0A draw.text((width-width//8, 0), text, font=myfont, fill=fillcolor)%0A img.save('1.jpg','jpeg')%0A return 0%0A%0Aif __name__ == '__main__':%0A filname = '0.jpg'%0A text = '4'%0A fillcolor = (255, 0, 0)%0A add_num(filname, text, fillcolor)
|
|
ebc2b419a3cc7cace9c79d1c5032a2ae33b8bff1
|
Remove unused imports
|
custom/up_nrhm/reports/asha_reports.py
|
custom/up_nrhm/reports/asha_reports.py
|
import datetime
from dateutil.relativedelta import relativedelta
from corehq.apps.reports.filters.select import MonthFilter, YearFilter
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports.standard import CustomProjectReport, DatespanMixin
from corehq.apps.reports.filters.dates import DatespanFilter
from custom.up_nrhm.filters import DrillDownOptionFilter, SampleFormatFilter
from custom.up_nrhm.reports.asha_facilitators_report import ASHAFacilitatorsReport
from custom.up_nrhm.reports.block_level_af import BlockLevelAFReport
from custom.up_nrhm.reports.block_level_month_report import BlockLevelMonthReport
def total_rows(report):
if not report.report_config.get('sf'):
return {
"total_under_facilitator": getattr(report, 'total_under_facilitator', 0),
"total_with_checklist": getattr(report, 'total_with_checklist', 0)
}
return {}
class ASHAReports(GenericTabularReport, DatespanMixin, CustomProjectReport):
fields = [SampleFormatFilter, DatespanFilter, DrillDownOptionFilter, MonthFilter, YearFilter]
name = "ASHA Reports"
slug = "asha_reports"
show_all_rows = True
default_rows = 20
printable = True
report_template_path = "up_nrhm/asha_report.html"
extra_context_providers = [total_rows]
no_value = '--'
@property
def report_config(self):
config = {
'sf': self.request.GET.get('sf'),
}
return config
@property
def report_context(self):
context = super(ASHAReports, self).report_context
context['sf'] = self.request.GET.get('sf')
return context
@property
def model(self):
config = self.report_config
if config.get('sf') == 'sf5':
return []
elif config.get('sf') == 'sf4':
return []
elif config.get('sf') == 'sf3':
return BlockLevelMonthReport(self.request, domain=self.domain)
else:
return ASHAFacilitatorsReport(self.request, domain=self.domain)
@property
def headers(self):
return self.model.headers
@property
def rows(self):
config = self.report_config
if not config.get('sf'):
rows, self.total_under_facilitator, total_with_checklist = self.model.rows
else:
rows = self.model.rows
return rows
|
Python
| 0.000001 |
@@ -1,69 +1,4 @@
-import datetime%0Afrom dateutil.relativedelta import relativedelta%0A
from
@@ -426,77 +426,8 @@
ort%0A
-from custom.up_nrhm.reports.block_level_af import BlockLevelAFReport%0A
from
|
3d8f02eb7c1b9b363143f25af9eadeb94c43b4ae
|
increase uwnetid maxlength
|
myuw/migrations/0017_netidlen.py
|
myuw/migrations/0017_netidlen.py
|
Python
| 0.000015 |
@@ -0,0 +1,395 @@
+# Generated by Django 2.0.13 on 2020-03-12 17:48%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('myuw', '0016_myuw_notice_group'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='user',%0A name='uwnetid',%0A field=models.SlugField(max_length=32, unique=True),%0A ),%0A %5D%0A
|
|
00a9d09e83ca3ee77d56a795f88d8d464c9c1063
|
Version change - Added Tamu provider
|
geocoder/__init__.py
|
geocoder/__init__.py
|
#!/usr/bin/python
# coding: utf8
from __future__ import absolute_import
"""
Geocoder
~~~~~~~~
Simple and consistent geocoding library written in Python.
Many online providers such as Google & Bing have geocoding services,
these providers do not include Python libraries and have different
JSON responses between each other.
Consistant JSON responses from various providers.
>>> g = geocoder.google('New York City')
>>> g.latlng
[40.7127837, -74.0059413]
>>> g.state
'New York'
>>> g.json
...
"""
__title__ = 'geocoder'
__author__ = 'Denis Carriere'
__author_email__ = '[email protected]'
__version__ = '1.9.0'
__license__ = 'MIT'
__copyright__ = 'Copyright (c) 2013-2016 Denis Carriere'
# CORE
from geocoder.api import get, yahoo, bing, geonames, mapquest, google, mapbox # noqa
from geocoder.api import nokia, osm, tomtom, geolytica, arcgis, opencage # noqa
from geocoder.api import maxmind, ipinfo, freegeoip, ottawa, here, baidu, w3w, yandex, mapzen, komoot, tamu # noqa
# EXTRAS
from geocoder.api import timezone, elevation, ip, canadapost, reverse, distance, location # noqa
# CLI
from geocoder.cli import cli # noqa
|
Python
| 0 |
@@ -641,17 +641,18 @@
__ = '1.
-9
+10
.0'%0A__li
|
53a0e58bb68c3fb247a65fabf6c80b5bb41f440e
|
Fix custom attribute test factories
|
test/integration/ggrc/models/factories.py
|
test/integration/ggrc/models/factories.py
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Factories for models"""
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
import random
import factory
from ggrc import db
from ggrc import models
def random_string(prefix=''):
return '{prefix}{suffix}'.format(
prefix=prefix,
suffix=random.randint(0, 9999999999),
)
class ModelFactory(factory.Factory):
@classmethod
def _create(cls, target_class, *args, **kwargs):
instance = target_class(*args, **kwargs)
db.session.add(instance)
db.session.commit()
return instance
class TitledFactory(factory.Factory):
title = factory.LazyAttribute(lambda m: random_string('title'))
class CustomAttributeDefinitionFactory(ModelFactory):
class Meta:
model = models.CustomAttributeDefinition
title = None
definition_type = None
definition_id = None
attribute_type = None
multi_choice_options = None
class CustomAttributeValueFactory(ModelFactory):
class Meta:
model = models.CustomAttributeValue
custom_attribute_id = None
attributable_id = None
attributable_type = None
attribute_value = None
attribute_object_id = None
class DirectiveFactory(ModelFactory, TitledFactory):
class Meta:
model = models.Directive
class ControlFactory(ModelFactory, TitledFactory):
class Meta:
model = models.Control
directive = factory.SubFactory(DirectiveFactory)
kind_id = None
version = None
documentation_description = None
verify_frequency_id = None
fraud_related = None
key_control = None
active = None
notes = None
class AssessmentFactory(ModelFactory, TitledFactory):
class Meta:
model = models.Assessment
class ControlCategoryFactory(ModelFactory):
class Meta:
model = models.ControlCategory
name = factory.LazyAttribute(lambda m: random_string('name'))
lft = None
rgt = None
scope_id = None
depth = None
required = None
class CategorizationFactory(ModelFactory):
class Meta:
model = models.Categorization
category = None
categorizable = None
category_id = None
categorizable_id = None
categorizable_type = None
class ContextFactory(ModelFactory):
class Meta:
model = models.Context
name = factory.LazyAttribute(
lambda obj: random_string("SomeObjectType Context"))
related_object = None
class ProgramFactory(ModelFactory):
class Meta:
model = models.Program
title = factory.LazyAttribute(lambda _: random_string("program_title"))
slug = factory.LazyAttribute(lambda _: random_string(""))
class AuditFactory(ModelFactory):
class Meta:
model = models.Audit
title = factory.LazyAttribute(lambda _: random_string("audit title "))
slug = factory.LazyAttribute(lambda _: random_string(""))
status = "Planned"
program_id = factory.LazyAttribute(lambda _: ProgramFactory().id)
context_id = factory.LazyAttribute(lambda _: ContextFactory().id)
class AssessmentTemplateFactory(ModelFactory):
class Meta:
model = models.AssessmentTemplate
title = factory.LazyAttribute(
lambda _: random_string("assessment template title"))
template_object_type = None
test_plan_procedure = False
procedure_description = factory.LazyAttribute(
lambda _: random_string("lorem ipsum description"))
default_people = \
"{\"assessors\":\"Object Owners\",\"verifiers\":\"Object Owners\"}"
class ContractFactory(ModelFactory):
class Meta:
model = models.Contract
class EventFactory(ModelFactory):
class Meta:
model = models.Event
revisions = []
class RelationshipFactory(ModelFactory):
class Meta:
model = models.Relationship
source = None
destination = None
class RelationshipAttrFactory(ModelFactory):
class Meta:
model = models.RelationshipAttr
relationship_id = None
attr_name = None
attr_value = None
class PersonFactory(ModelFactory):
class Meta:
model = models.Person
|
Python
| 0.000001 |
@@ -823,35 +823,50 @@
ory(ModelFactory
+, TitledFactory
):%0A
-
%0A class Meta:%0A
@@ -914,23 +914,8 @@
on%0A%0A
- title = None%0A
de
@@ -977,20 +977,22 @@
_type =
-None
+%22Text%22
%0A multi
@@ -1139,19 +1139,16 @@
ttribute
-_id
= None%0A
|
1e7b84155623691fb9fc1cec4efa6386938f3e72
|
Add missing migration (updating validators=)
|
core/migrations/0055_update_username_validators.py
|
core/migrations/0055_update_username_validators.py
|
Python
| 0 |
@@ -0,0 +1,867 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.9.7 on 2016-07-22 22:03%0Afrom __future__ import unicode_literals%0A%0Aimport django.core.validators%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('core', '0054_add_provider__cloud_config_and_timezone'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='atmosphereuser',%0A name='username',%0A field=models.CharField(error_messages=%7B'unique': 'A user with that username already exists.'%7D, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, unique=True, validators=%5Bdjango.core.validators.RegexValidator('%5E%5B%5C%5Cw.@+-%5D+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')%5D, verbose_name='username'),%0A ),%0A %5D%0A
|
|
48217e5317412a9b5fb8181b6915963783efeaf2
|
Add test for kline result of exact amount
|
tests/test_historical_klines.py
|
tests/test_historical_klines.py
|
Python
| 0 |
@@ -0,0 +1,940 @@
+#!/usr/bin/env python%0A# coding=utf-8%0A%0Afrom binance.client import Client%0Aimport pytest%0Aimport requests_mock%0A%0A%0Aclient = Client('api_key', 'api_secret')%0A%0A%0Adef test_exact_amount():%0A %22%22%22Test Exact amount returned%22%22%22%0A%0A first_res = %5B%5D%0A row = %5B1519892340000,%220.00099400%22,%220.00099810%22,%220.00099400%22,%220.00099810%22,%224806.04000000%22,1519892399999,%224.78553253%22,154,%221785.14000000%22,%221.77837524%22,%220%22%5D%0A%0A for i in range(0, 500):%0A first_res.append(row)%0A%0A second_res = %5B%5D%0A%0A with requests_mock.mock() as m:%0A m.get('https://api.binance.com/api/v1/klines?interval=1m&limit=500&startTime=1519862400000&symbol=BNBBTC', json=first_res)%0A m.get('https://api.binance.com/api/v1/klines?interval=1m&limit=500&startTime=1519892400000&symbol=BNBBTC', json=second_res)%0A client.get_historical_klines(%0A symbol=%22BNBBTC%22,%0A interval=Client.KLINE_INTERVAL_1MINUTE,%0A start_str=%221st March 2018%22%0A )%0A
|
|
1f3a15b8ae6ffcb96faaf0acab940d9590fe6cb1
|
Add migration
|
fat/migrations/0064_auto_20160809_1559.py
|
fat/migrations/0064_auto_20160809_1559.py
|
Python
| 0.000002 |
@@ -0,0 +1,663 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.9.5 on 2016-08-09 15:59%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('fat', '0063_auto_20160809_1545'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='expense',%0A name='status',%0A field=models.CharField(choices=%5B('W', 'Not submitted yet'), ('S', 'Submitted (awaiting processing)'), ('C', 'Administrator checking'), ('P', 'Authoriser checking'), ('A', 'Approved (submitted to finance)'), ('F', 'Finished')%5D, default='P', max_length=1),%0A ),%0A %5D%0A
|
|
5ec3f8dbe9f044d08a80563c05b648590fabdda7
|
add fibonnaci example
|
examples/fib.py
|
examples/fib.py
|
Python
| 0.999895 |
@@ -0,0 +1,562 @@
+# / 0 if i is 0%0A# fib(i) = %7C 1 if i is 1%0A# %5C fib(i - 1) + fib(i - 2) otherwise%0A%0Adef fib(n):%0A %22%22%22 Imperative definition of Fibonacci numbers %22%22%22%0A a, b = 0, 1%0A for i in range(n):%0A a, b = b, a + b%0A return b%0A%0A%0A# This is intuitive but VERY slow%0Adef fib(n):%0A %22%22%22 Functional definition of Fibonacci numbers %22%22%22%0A if n == 0 or n == 1:%0A return n%0A else:%0A return fib(n - 1) + fib(n - 2)%0A%0Afrom toolz import memoize%0A%0A# Oh wait, it's fast again%0Afib = memoize(fib)%0A
|
|
c663f6b6e31832fae682c2c527955b13682b701e
|
Remove learner_testimonials column from course_metadata course run table
|
course_discovery/apps/course_metadata/migrations/0127_remove_courserun_learner_testimonials.py
|
course_discovery/apps/course_metadata/migrations/0127_remove_courserun_learner_testimonials.py
|
Python
| 0.000002 |
@@ -0,0 +1,429 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.15 on 2018-11-07 17:16%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('course_metadata', '0126_course_has_ofac_restrictions'),%0A %5D%0A%0A operations = %5B%0A migrations.RemoveField(%0A model_name='courserun',%0A name='learner_testimonials',%0A ),%0A %5D%0A
|
|
8b1bd5995ff4c95335e25e19962724e6d8c399d7
|
Create 0003_auto_20150930_1132.py
|
cities/migrations/0003_auto_20150930_1132.py
|
cities/migrations/0003_auto_20150930_1132.py
|
Python
| 0.000016 |
@@ -0,0 +1,1070 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('cities', '0002_auto_20150811_1912'),%0A %5D%0A%0A operations = %5B%0A migrations.AddField(%0A model_name='city',%0A name='name_de',%0A field=models.CharField(db_index=True, verbose_name='ascii name', null=True, max_length=200),%0A ),%0A migrations.AddField(%0A model_name='city',%0A name='name_en',%0A field=models.CharField(db_index=True, verbose_name='ascii name', null=True, max_length=200),%0A ),%0A migrations.AddField(%0A model_name='country',%0A name='name_de',%0A field=models.CharField(db_index=True, verbose_name='ascii name', null=True, max_length=200),%0A ),%0A migrations.AddField(%0A model_name='country',%0A name='name_en',%0A field=models.CharField(db_index=True, verbose_name='ascii name', null=True, max_length=200),%0A ),%0A %5D%0A
|
|
b75e10f3235e9215458071279b67910627a95180
|
Add celery based job runner
|
ceam/framework/celery_tasks.py
|
ceam/framework/celery_tasks.py
|
Python
| 0.000011 |
@@ -0,0 +1,1683 @@
+import os%0Afrom time import time%0Aimport logging%0A%0Aimport pandas as pd%0A%0Afrom celery import Celery%0Afrom billiard import current_process%0A%0A%0Aapp = Celery()%0A%[email protected](autoretry_for=(Exception,), max_retries=2)%0Adef worker(draw_number, component_config, branch_config, logging_directory):%0A worker = current_process().index%0A logging.basicConfig(format='%25(asctime)s - %25(name)s - %25(levelname)s - %25(message)s', filename=os.path.join(logging_directory, str(worker)+'.log'), level=logging.DEBUG)%0A logging.info('Starting job: %7B%7D'.format((draw_number, component_config, branch_config)))%0A%0A run_configuration = component_config%5B'configuration'%5D.get('run_configuration', %7B%7D)%0A results_directory = run_configuration%5B'results_directory'%5D%0A run_configuration%5B'run_id'%5D = str(worker)+'_'+str(time())%0A if branch_config is not None:%0A run_configuration%5B'run_key'%5D = dict(branch_config)%0A run_configuration%5B'run_key'%5D%5B'draw'%5D = draw_number%0A component_config%5B'configuration'%5D%5B'run_configuration'%5D = run_configuration%0A%0A try:%0A from ceam.framework.engine import configure, run%0A from ceam.framework.components import prepare_component_configuration%0A from ceam.framework.util import collapse_nested_dict%0A%0A configure(draw_number=draw_number, simulation_config=branch_config)%0A results = run(prepare_component_configuration(component_config))%0A results = pd.DataFrame(results, index=%5Bdraw_number%5D).to_json()%0A%0A return results%0A except Exception as e:%0A logging.exception('Unhandled exception in worker')%0A raise%0A finally:%0A logging.info('Exiting job: %7B%7D'.format((draw_number, component_config, branch_config)))%0A%0A
|
|
164f43f902b89b84b4f0d474f4d3e0a18924110d
|
Add test of randomized select algorithm
|
selection_test.py
|
selection_test.py
|
Python
| 0.000005 |
@@ -0,0 +1,640 @@
+import quicksort.quicksort%0Aimport random_selection.random_selection%0Aimport sys%0Aimport time%0Afrom random import randint%0A%0Adef main(max_len, check):%0A%09for n in %5B2**(n+1) for n in range(max_len)%5D:%0A%09%09arr = %5Brandint(0, 2**max_len) for n in range(n)%5D%0A%0A%09%09median = int((len(arr)+1)/2) - 1%0A%0A%09%09current_time = time.time()%0A%09%09result = random_selection.random_selection.select(arr, median)%0A%09%09end_time = time.time() - current_time%0A%0A%09%09sorted_arr = quicksort.quicksort.sort(arr)%0A%0A%09%09if sorted_arr%5Bmedian%5D == result:%0A%09%09%09print %22Success! In %25f%22 %25 end_time%0A%0A%09%09else:%0A%09%09%09print %22Failed%22%0A%0A%09return%0A%0Aif __name__ == '__main__':%0A%09%09arr_len = int(sys.argv%5B1%5D)%0A%09%09main(arr_len)
|
|
9168807db69372ffb93430991fc4e666fa53a8f5
|
Add missing example file
|
examples/movemean.py
|
examples/movemean.py
|
Python
| 0.000005 |
@@ -0,0 +1,583 @@
+%22%22%22%0AA moving average function using @guvectorize.%0A%22%22%22%0A%0Aimport numpy as np%0A%0Afrom numba import guvectorize%0A%0A@guvectorize(%5B'void(float64%5B:%5D, intp%5B:%5D, float64%5B:%5D)'%5D, '(n),()-%3E(n)')%0Adef move_mean(a, window_arr, out):%0A window_width = window_arr%5B0%5D%0A asum = 0.0%0A count = 0%0A for i in range(window_width):%0A asum += a%5Bi%5D%0A count += 1%0A out%5Bi%5D = asum / count%0A for i in range(window_width, len(a)):%0A asum += a%5Bi%5D - a%5Bi - window_width%5D%0A out%5Bi%5D = asum / count%0A%0Aarr = np.arange(20, dtype=np.float64).reshape(2, 10)%0Aprint(arr)%0Aprint(move_mean(arr, 3))%0A
|
|
1376fa5a9369bcff3fbfdf09a103a2b5c8b802f2
|
add missing os import and fix undefined gen_log
|
zmq/eventloop/ioloop.py
|
zmq/eventloop/ioloop.py
|
# coding: utf-8
"""tornado IOLoop API with zmq compatibility
If you have tornado ≥ 3.0, this is a subclass of tornado's IOLoop,
otherwise we ship a minimal subset of tornado in zmq.eventloop.minitornado.
The minimal shipped version of tornado's IOLoop does not include
support for concurrent futures - this will only be available if you
have tornado ≥ 3.0.
Authors
-------
* MinRK
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, with_statement
import logging
import time
import warnings
from zmq import (
Poller,
POLLIN, POLLOUT, POLLERR,
ZMQError, ETERM,
)
try:
import tornado
tornado_version = tornado.version_info
except (ImportError, AttributeError):
tornado_version = ()
try:
# tornado ≥ 3
from tornado.ioloop import PollIOLoop, PeriodicCallback
except ImportError:
from .minitornado.ioloop import PollIOLoop, PeriodicCallback
class DelayedCallback(PeriodicCallback):
"""Schedules the given callback to be called once.
The callback is called once, after callback_time milliseconds.
`start` must be called after the DelayedCallback is created.
The timeout is calculated from when `start` is called.
"""
def __init__(self, callback, callback_time, io_loop=None):
# PeriodicCallback require callback_time to be positive
warnings.warn("""DelayedCallback is deprecated.
Use loop.add_timeout instead.""", DeprecationWarning)
callback_time = max(callback_time, 1e-3)
super(DelayedCallback, self).__init__(callback, callback_time, io_loop)
def start(self):
"""Starts the timer."""
self._running = True
self._firstrun = True
self._next_timeout = time.time() + self.callback_time / 1000.0
self.io_loop.add_timeout(self._next_timeout, self._run)
def _run(self):
if not self._running: return
self._running = False
try:
self.callback()
except Exception:
logging.error("Error in delayed callback", exc_info=True)
class ZMQPoller(object):
"""A poller that can be used in the tornado IOLoop.
This simply wraps a regular zmq.Poller, scaling the timeout
by 1000, so that it is in seconds rather than milliseconds.
"""
def __init__(self):
self._poller = Poller()
@staticmethod
def _map_events(events):
"""translate IOLoop.READ/WRITE/ERROR event masks into zmq.POLLIN/OUT/ERR"""
z_events = 0
if events & IOLoop.READ:
z_events |= POLLIN
if events & IOLoop.WRITE:
z_events |= POLLOUT
if events & IOLoop.ERROR:
z_events |= POLLERR
return z_events
@staticmethod
def _remap_events(z_events):
"""translate zmq.POLLIN/OUT/ERR event masks into IOLoop.READ/WRITE/ERROR"""
events = 0
if z_events & POLLIN:
events |= IOLoop.READ
if z_events & POLLOUT:
events |= IOLoop.WRITE
if z_events & POLLERR:
events |= IOLoop.ERROR
return events
def register(self, fd, events):
return self._poller.register(fd, self._map_events(events))
def modify(self, fd, events):
return self._poller.modify(fd, self._map_events(events))
def unregister(self, fd):
return self._poller.unregister(fd)
def poll(self, timeout):
"""poll in seconds rather than milliseconds.
Event masks will be IOLoop.READ/WRITE/ERROR
"""
z_events = self._poller.poll(1000*timeout)
return [ (fd,self._remap_events(evt)) for (fd,evt) in z_events ]
def close(self):
pass
class ZMQIOLoop(PollIOLoop):
"""ZMMQ subclass of tornado's IOLoop"""
def initialize(self, **kwargs):
super(ZMQIOLoop, self).initialize(impl=ZMQPoller(), **kwargs)
@staticmethod
def instance():
"""Returns a global `IOLoop` instance.
Most applications have a single, global `IOLoop` running on the
main thread. Use this method to get this instance from
another thread. To get the current thread's `IOLoop`, use `current()`.
"""
# install ZMQIOLoop as the active IOLoop implementation
# when using tornado 3
if tornado_version >= (3,):
PollIOLoop.configure(ZMQIOLoop)
return PollIOLoop.instance()
def close(self, all_fds=False):
"""override to use *method* to close FDs
instead of os.close on everything, which doesn't work on zmq Sockets.
Should be fixed in a future tornado release.
"""
with self._callback_lock:
self._closing = True
self.remove_handler(self._waker.fileno())
if all_fds:
for fd in self._handlers.keys():
try:
# begin patch
try:
fd.close()
except AttributeError:
os.close(fd)
# end patch
except Exception:
gen_log.debug("error closing fd %s", fd, exc_info=True)
self._waker.close()
self._impl.close()
def start(self):
try:
super(ZMQIOLoop, self).start()
except ZMQError as e:
if e.errno == ETERM:
# quietly return on ETERM
pass
else:
raise e
# public API name
IOLoop = ZMQIOLoop
def install():
"""set the tornado IOLoop instance with the pyzmq IOLoop.
After calling this function, tornado's IOLoop.instance() and pyzmq's
IOLoop.instance() will return the same object.
An assertion error will be raised if tornado's IOLoop has been initialized
prior to calling this function.
"""
from tornado import ioloop
# check if tornado's IOLoop is already initialized to something other
# than the pyzmq IOLoop instance:
assert (not ioloop.IOLoop.initialized()) or \
ioloop.IOLoop.instance() is IOLoop.instance(), "tornado IOLoop already initialized"
if tornado_version >= (3,):
# tornado 3 has an official API for registering new defaults, yay!
ioloop.IOLoop.configure(ZMQIOLoop)
else:
# we have to set the global instance explicitly
ioloop.IOLoop._instance = IOLoop.instance()
|
Python
| 0 |
@@ -837,16 +837,26 @@
tement%0A%0A
+import os%0A
import l
@@ -5515,14 +5515,14 @@
-gen_lo
+loggin
g.de
|
82d34111295fdfa35d0e9815053498e935d415af
|
Add example script to store & read datetime
|
examples/store_datetimes.py
|
examples/store_datetimes.py
|
Python
| 0 |
@@ -0,0 +1,244 @@
+import h5py%0Aimport numpy as np%0A%0Aarr = np.array(%5Bnp.datetime64('2019-09-22T17:38:30')%5D)%0A%0Awith h5py.File('datetimes.h5', 'w') as f:%0A # Create dataset%0A f%5B'data'%5D = arr.astype(h5py.opaque_dtype(arr.dtype))%0A%0A # Read%0A print(f%5B'data'%5D%5B:%5D)%0A
|
|
e581eb8af860456b0ff46e99398002b3df0f0677
|
add Julia magic for IPython
|
julia/magic.py
|
julia/magic.py
|
Python
| 0.000406 |
@@ -0,0 +1,1954 @@
+%22%22%22%0A==========================%0A Julia magics for IPython%0A==========================%0A%0A%7BJULIAMAGICS_DOC%7D%0A%0AUsage%0A=====%0A%0A%60%60%25%25julia%60%60%0A%0A%7BJULIA_DOC%7D%0A%22%22%22%0A%0A#-----------------------------------------------------------------------------%0A# Imports%0A#-----------------------------------------------------------------------------%0A%0Afrom __future__ import print_function%0A%0Aimport sys%0A%0Afrom IPython.core.magic import Magics, magics_class, line_cell_magic%0A%0Afrom julia import Julia %0A%0A#-----------------------------------------------------------------------------%0A# Main classes%0A#-----------------------------------------------------------------------------%0A%0A@magics_class%0Aclass JuliaMagics(Magics):%0A %22%22%22A set of magics useful for interactive work with Julia.%0A %22%22%22%0A def __init__(self, shell):%0A %22%22%22%0A Parameters%0A ----------%0A shell : IPython shell%0A%0A %22%22%22%0A%0A super(JuliaMagics, self).__init__(shell)%0A print(%22Initializing Julia interpreter. This may take some time...%22,%0A end='')%0A # Flush, otherwise the Julia startup will keep stdout buffered%0A sys.stdout.flush()%0A self.julia = Julia(init_julia=True)%0A print()%0A %0A @line_cell_magic%0A def julia(self, line, cell=None):%0A %22%22%22%0A Execute code in Julia, and pull some of the results back into the%0A Python namespace.%0A %22%22%22%0A src = str(line if cell is None else cell)%0A return self.julia.eval(src)%0A%0A# Add to the global docstring the class information.%0A__doc__ = __doc__.format(%0A JULIAMAGICS_DOC = ' '*8 + JuliaMagics.__doc__,%0A JULIA_DOC = ' '*8 + JuliaMagics.julia.__doc__,%0A )%0A%0A%0A#-----------------------------------------------------------------------------%0A# IPython registration entry point.%0A#-----------------------------------------------------------------------------%0A%0Adef load_ipython_extension(ip):%0A %22%22%22Load the extension in IPython.%22%22%22%0A ip.register_magics(JuliaMagics)%0A
|
|
12691d47c4dbbaac42d2c9a8fe04e70cb5a94e98
|
add Yaspin.write usage example
|
examples/write_method.py
|
examples/write_method.py
|
Python
| 0 |
@@ -0,0 +1,466 @@
+# -*- coding: utf-8 -*-%0A%0A%22%22%22%0Aexamples.write_method%0A~~~~~~~~~~~~~~~~~~~~~%0A%0ABasic usage of %60%60write%60%60 method.%0A%22%22%22%0A%0Aimport time%0A%0Afrom yaspin import yaspin%0A%0A%0Adef main():%0A with yaspin(text='Downloading images') as sp:%0A # task 1%0A time.sleep(1)%0A sp.write('%3E image 1 download complete')%0A%0A # task 2%0A time.sleep(2)%0A sp.write('%3E image 2 download complete')%0A%0A # finalize%0A sp.ok()%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
324bc6f72deef0349f0da48366ab11b749a231b5
|
Make AzureKeyVaultBackend backwards-compatible (#12626)
|
airflow/contrib/secrets/azure_key_vault.py
|
airflow/contrib/secrets/azure_key_vault.py
|
Python
| 0 |
@@ -0,0 +1,1208 @@
+#%0A# Licensed to the Apache Software Foundation (ASF) under one%0A# or more contributor license agreements. See the NOTICE file%0A# distributed with this work for additional information%0A# regarding copyright ownership. The ASF licenses this file%0A# to you under the Apache License, Version 2.0 (the%0A# %22License%22); you may not use this file except in compliance%0A# with the License. You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing,%0A# software distributed under the License is distributed on an%0A# %22AS IS%22 BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY%0A# KIND, either express or implied. See the License for the%0A# specific language governing permissions and limitations%0A# under the License.%0A%0A%22%22%22This module is deprecated. Please use %60airflow.providers.microsoft.azure.secrets.azure_key_vault%60.%22%22%22%0A%0Aimport warnings%0A%0A# pylint: disable=unused-import%0Afrom airflow.providers.microsoft.azure.secrets.azure_key_vault import AzureKeyVaultBackend # noqa%0A%0Awarnings.warn(%0A %22This module is deprecated. Please use %60airflow.providers.microsoft.azure.secrets.azure_key_vault%60.%22,%0A DeprecationWarning,%0A stacklevel=2,%0A)%0A
|
|
165d6795c2e3b173282736127c092ede57ae8f55
|
Create create_recurring_for_failed.py
|
erpnext/patches/v6_27/create_recurring_for_failed.py
|
erpnext/patches/v6_27/create_recurring_for_failed.py
|
Python
| 0.000004 |
@@ -0,0 +1,324 @@
+import frappe%0Afrom erpnext.controllers.recurring_document import manage_recurring_documents%0A%0Adef execute():%0A%0A%09frappe.db.sql(%22%22%22update %60tabSales Invoice%60 %0A%09%09%09%09set is_recurring=1 where (docstatus=1 or docstatus=0) and next_date='2016-06-26' and is_recurring=0%22%22%22)%0A%09%0A%09manage_recurring_documents(%22Sales Invoice%22, %222016-06-26%22)%0A%0A
|
|
93d1d4cc446cd13affaf1b467e39845c5dc437a5
|
Add missing migration
|
events/migrations/0002_auto_20150119_2138.py
|
events/migrations/0002_auto_20150119_2138.py
|
Python
| 0.0002 |
@@ -0,0 +1,1027 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('events', '0001_initial'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='offer',%0A name='price',%0A field=models.CharField(max_length=512),%0A preserve_default=True,%0A ),%0A migrations.AlterField(%0A model_name='offer',%0A name='price_en',%0A field=models.CharField(null=True, max_length=512),%0A preserve_default=True,%0A ),%0A migrations.AlterField(%0A model_name='offer',%0A name='price_fi',%0A field=models.CharField(null=True, max_length=512),%0A preserve_default=True,%0A ),%0A migrations.AlterField(%0A model_name='offer',%0A name='price_sv',%0A field=models.CharField(null=True, max_length=512),%0A preserve_default=True,%0A ),%0A %5D%0A
|
|
a8b46224dfda38173ea130d820411aad6a47acfc
|
Add Commander.py
|
src/Commander.py
|
src/Commander.py
|
Python
| 0.000004 |
@@ -0,0 +1,1596 @@
+# Copyright (c) 2013 Molly White%0A#%0A# Permission is hereby granted, free of charge, to any person obtaining a copy%0A# of this software and associated documentation files (the %22Software%22), to deal%0A# in the Software without restriction, including without limitation the rights%0A# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell%0A# copies of the Software, and to permit persons to whom the Software is%0A# furnished to do so, subject to the following conditions:%0A#%0A# The above copyright notice and this permission notice shall be included in%0A# all copies or substantial portions of the Software.%0A#%0A# THE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR%0A# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,%0A# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE%0A# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER%0A# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,%0A# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE%0A# SOFTWARE.%0A%0Aimport argparse%0Afrom Bot import Bot%0Afrom time import strftime%0Aimport logging%0A%0Adef main():%0A print (%22Starting GorillaBot.%5Cn%22)%0A desc = %22This is the command-line utility for setting up and running GorillaBot, %22%0A %22a simple IRC bot.%22%0A parser = argparse.ArgumentParser(description=desc)%0A parser.add_argument(%22-default%22, action=%22store_true%22)%0A logger = logging.getLogger(%22GB%22)%0A logger.info(%22LOG!%22)%0A %0A GorillaBot = Bot()%0A parser.parse_args()%0A %0Aif __name__ == %22__main__%22:%0A main()%0A
|
|
52c9a8ab10934c7acf8bcc404dccd2524199acb7
|
support for qualifying keys with dot('.') in JSON reference
|
src/DictUtils.py
|
src/DictUtils.py
|
import collections
class DictUtils:
@staticmethod
def __retrieveFromDict(t, key):
if None != t:
found = True
if str == type(key):
keys = [key]
else:
keys = key
for k in keys:
if k in t:
t = t[k]
else:
found = False
break
if found:
return t
return None
@staticmethod
def defaultIfNone(theDict, defaultDict, key):
if None == key:
return None
val = DictUtils.__retrieveFromDict(theDict, key)
if None != val:
return val
return DictUtils.__retrieveFromDict(defaultDict, key)
@staticmethod
def convert(data):
if isinstance(data, basestring):
return str(data)
elif isinstance(data, collections.Mapping):
return dict(map(DictUtils.convert, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(DictUtils.convert, data))
else:
return data
|
Python
| 0.000072 |
@@ -195,13 +195,22 @@
s =
-%5B
key
-%5D
+.split('.')
%0A
|
60b13eb1a322336433824680a7ffe344afb6cdc7
|
Fix tests
|
udata/tests/api/test_url_api.py
|
udata/tests/api/test_url_api.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import httpretty
import requests
from flask import url_for
from udata.tests.api import APITestCase
from udata.utils import faker
from udata.settings import Testing
CROQUEMORT_URL = 'http://check.test'
CHECK_ONE_URL = '{0}/check/one'.format(CROQUEMORT_URL)
METADATA_URL = '{0}/url'.format(CROQUEMORT_URL)
def metadata_factory(url, data=None):
response = {
'etag': '',
'url': url,
'content-length': faker.pyint(),
'content-disposition': '',
'content-md5': faker.md5(),
'content-location': '',
'expires': faker.iso8601(),
'status': 200,
'updated': faker.iso8601(),
'last-modified': faker.iso8601(),
'content-encoding': 'gzip',
'content-type': faker.mime_type()
}
if data:
response.update(data)
return json.dumps(response)
def mock_url_check(url, data=None, status=200):
url_hash = faker.md5()
httpretty.register_uri(httpretty.POST, CHECK_ONE_URL,
body=json.dumps({'url-hash': url_hash}),
content_type='application/json')
check_url = '/'.join((METADATA_URL, url_hash))
httpretty.register_uri(httpretty.GET, check_url,
body=metadata_factory(url, data),
content_type='application/json',
status=status)
def exception_factory(exception):
def callback(request, uri, headers):
raise exception
return callback
class CheckUrlSettings(Testing):
CROQUEMORT = {
'url': CROQUEMORT_URL,
'retry': 2,
'delay': 1,
}
class CheckUrlAPITest(APITestCase):
settings = CheckUrlSettings
@httpretty.activate
def test_returned_metadata(self):
url = faker.uri()
metadata = {
'content-type': 'text/html; charset=utf-8',
'status': 200,
}
mock_url_check(url, metadata)
response = self.get(url_for('api.checkurl'),
qs={'url': url, 'group': ''})
self.assert200(response)
self.assertEqual(response.json['status'], 200)
self.assertEqual(response.json['url'], url)
self.assertEqual(response.json['content-type'],
'text/html; charset=utf-8')
@httpretty.activate
def test_invalid_url(self):
url = faker.uri()
mock_url_check(url, {'status': 503})
response = self.get(url_for('api.checkurl'),
qs={'url': url, 'group': ''})
self.assertStatus(response, 503)
@httpretty.activate
def test_delayed_url(self):
url = faker.uri()
mock_url_check(url, status=404)
response = self.get(url_for('api.checkurl'),
qs={'url': url, 'group': ''})
self.assertStatus(response, 500)
self.assertEqual(
response.json['error'],
'We were unable to retrieve the URL after 2 attempts.')
@httpretty.activate
def test_timeout(self):
url = faker.uri()
exception = requests.Timeout('Request timed out')
httpretty.register_uri(httpretty.POST, CHECK_ONE_URL,
body=exception_factory(exception))
response = self.get(url_for('api.checkurl'),
qs={'url': url, 'group': ''})
self.assertStatus(response, 503)
@httpretty.activate
def test_connection_error(self):
url = faker.uri()
exception = requests.ConnectionError('Unable to connect')
httpretty.register_uri(httpretty.POST, CHECK_ONE_URL,
body=exception_factory(exception))
response = self.get(url_for('api.checkurl'),
qs={'url': url, 'group': ''})
self.assertStatus(response, 503)
@httpretty.activate
def test_json_error_check_one(self):
url = faker.uri()
httpretty.register_uri(httpretty.POST, CHECK_ONE_URL,
body='<strong>not json</strong>',
content_type='test/html')
response = self.get(url_for('api.checkurl'),
qs={'url': url, 'group': ''})
self.assertStatus(response, 503)
@httpretty.activate
def test_json_error_check_url(self):
url = faker.uri()
url_hash = faker.md5()
httpretty.register_uri(httpretty.POST, CHECK_ONE_URL,
body=json.dumps({'url-hash': url_hash}),
content_type='application/json')
check_url = '/'.join((METADATA_URL, url_hash))
httpretty.register_uri(httpretty.GET, check_url,
body='<strong>not json</strong>',
content_type='test/html')
response = self.get(url_for('api.checkurl'),
qs={'url': url, 'group': ''})
self.assertStatus(response, 500)
self.assertIn('error', response.json)
|
Python
| 0.000003 |
@@ -2890,33 +2890,33 @@
tus(response, 50
-0
+3
)%0A self.a
@@ -5014,17 +5014,17 @@
onse, 50
-0
+3
)%0A
|
fe36fd79c1981c489fd1db548c7468acbf98fff5
|
add test for s3 filename unquote
|
app/backend/gwells/tests/test_documents.py
|
app/backend/gwells/tests/test_documents.py
|
Python
| 0.000001 |
@@ -0,0 +1,1118 @@
+from django.test import TestCase%0Afrom gwells.documents import MinioClient%0A%0A%0Aclass DocumentsTestCase(TestCase):%0A%0A def test_document_url_with_space(self):%0A minio_client = MinioClient(disable_private=True)%0A%0A test_document = %7B%0A %22bucket_name%22: %22test_bucket%22,%0A %22object_name%22: %22test key%22%0A %7D%0A%0A test_url = minio_client.create_url(test_document, %22example.com%22, test_document.get(%22bucket_name%22))%0A%0A self.assertEqual(test_url, %22https://example.com/test_bucket/test key%22)%0A%0A def test_document_url_with_plus(self):%0A minio_client = MinioClient(disable_private=True)%0A%0A test_document = %7B%0A %22bucket_name%22: %22test_bucket%22,%0A%0A # if this was a real plus in the filename it should be %252B in the listing.%0A # spaces get encoded into + (so in this test case, this object_name originally had a space).%0A %22object_name%22: %22test+key%22 %0A %7D%0A%0A test_url = minio_client.create_url(test_document, %22example.com%22, test_document.get(%22bucket_name%22))%0A%0A self.assertEqual(test_url, %22https://example.com/test_bucket/test key%22)%0A
|
|
4b06b5ec929af3466bfe9f03892b6c68259a2e3e
|
add gunicorn app
|
gunicorn_app.py
|
gunicorn_app.py
|
Python
| 0 |
@@ -0,0 +1,446 @@
+#!/usr/bin/env python3%0A# -*- coding: utf-8 -*-%0A%0Aimport os%0A%0ADATA_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))%0A%0Afrom logbook.compat import redirect_logging%0Aredirect_logging()%0A%0Afrom aip import make%0Afrom aip.log import RedisPub%0A%0Awith RedisPub():%0A app = make(%0A instance_path=DATA_PATH,%0A instance_relative_config=True%0A )%0A%0Afrom werkzeug.contrib.fixers import ProxyFix%0Aapp.wsgi_app = ProxyFix(app.wsgi_app)%0A
|
|
52236b1ad285683d828b248e462a7b984d31e636
|
Add example of connecting OGR to matplotlib through shapely and numpy
|
examples/world.py
|
examples/world.py
|
Python
| 0.000002 |
@@ -0,0 +1,449 @@
+import ogr%0Aimport pylab%0Afrom numpy import asarray%0A%0Afrom shapely.wkb import loads%0A%0Asource = ogr.Open(%22/var/gis/data/world/world_borders.shp%22)%0Aborders = source.GetLayerByName(%22world_borders%22)%0A%0Afig = pylab.figure(1, figsize=(4,2), dpi=300)%0A%0Awhile 1:%0A feature = borders.GetNextFeature()%0A if not feature:%0A break%0A %0A geom = loads(feature.GetGeometryRef().ExportToWkb())%0A a = asarray(geom)%0A pylab.plot(a%5B:,0%5D, a%5B:,1%5D)%0A%0Apylab.show()%0A
|
|
bc871956d492a3bc34e28847de136e1b4ad82035
|
Create codechallenge.py
|
codechallenge.py
|
codechallenge.py
|
Python
| 0.000004 |
@@ -0,0 +1 @@
+%0A
|
|
08a813019c43288051e2ef5cbdfc6daaa0b6a32c
|
fix running rubyspec?
|
fabfile/travis.py
|
fabfile/travis.py
|
import glob
import os
from fabric.api import task, local
from fabric.context_managers import lcd
class Test(object):
def __init__(self, func, deps=[], needs_pypy=True, needs_rubyspec=False):
self.func = func
self.deps = deps
self.needs_pypy = needs_pypy
self.needs_rubyspec = needs_rubyspec
def install_deps(self):
local("pip install {}".format(" ".join(self.deps)))
def download_pypy(self):
local("wget https://bitbucket.org/pypy/pypy/get/default.tar.bz2 -O `pwd`/../pypy.tar.bz2")
local("bunzip2 `pwd`/../pypy.tar.bz2")
local("tar -xf `pwd`/../pypy.tar -C `pwd`/../")
[path_name] = glob.glob("../pypy-pypy*")
path_name = os.path.abspath(path_name)
with open("pypy_marker", "w") as f:
f.write(path_name)
def download_mspec(self):
with lcd(".."):
local("git clone --depth=100 --quiet https://github.com/rubyspec/mspec")
def download_rubyspec(self):
with lcd(".."):
local("git clone --depth=100 --quiet https://github.com/rubyspec/rubyspec")
def run_tests(self):
env = {}
if self.needs_pypy:
with open("pypy_marker") as f:
env["pypy_path"] = f.read()
self.func(env)
@task
def install_requirements():
t = TEST_TYPES[os.environ["TEST_TYPE"]]
if t.deps:
t.install_deps()
if t.needs_pypy:
t.download_pypy()
if t.needs_rubyspec:
t.download_mspec()
t.download_rubyspec()
@task
def run_tests():
t = TEST_TYPES[os.environ["TEST_TYPE"]]
t.run_tests()
def run_own_tests(env):
local("PYTHONPATH=$PYTHONPATH:{pypy_path} py.test".format(**env))
def run_translate_tests(env):
rubyspec_tests = [
"language/and_spec.rb",
"language/not_spec.rb",
"language/order_spec.rb",
"language/unless_spec.rb",
]
local("PYTHONPATH={pypy_path}:$PYTHONPATH python {pypy_path}/pypy/translator/goal/translate.py --batch -Ojit targetrupypy.py".format(**env))
spec_files = " ".join(os.path.join("../rubyspec", p) for p in rubyspec_tests)
local("../mspec/bin/mspec -t topaz-c {spec_files}".format(spec_files=spec_files))
def run_docs_tests(env):
local("sphinx-build -W -b html docs/ docs/_build/")
RPLY_URL = "-e git+https://github.com/alex/rply#egg=rply"
TEST_TYPES = {
"own": Test(run_own_tests, deps=["pytest", RPLY_URL]),
"translate": Test(run_translate_tests, deps=[RPLY_URL], needs_rubyspec=True),
"docs": Test(run_docs_tests, deps=["sphinx"], needs_pypy=False),
}
|
Python
| 0 |
@@ -2168,16 +2168,22 @@
spec -t
+%60pwd%60/
topaz-c
|
2044e3b018595e45cc2969d0675d5006ea02ccf5
|
update to use new struct data of g_project
|
trunk/editor/savefilerooms.py
|
trunk/editor/savefilerooms.py
|
#!/usr/bin/env python
from xml.dom import minidom
from xml.etree import ElementTree
#to use OrderedDict in python < 2.7
try:
from collections import OrderedDict
except ImportError:
from misc.dict import OrderedDict
from structdata.world import g_world
def prettify(content):
"""
Return a pretty-printed XML string for the Element.
"""
rough_string = ElementTree.tostring(content, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
def saveData(top, tag, dictionary):
tag_dict = {}
dict_todo = []
#cicla su tutti gli elementi del dizionario
#se trova delle liste le salva per poi richiamare se stessa
#su questi per poter memorizzare i dati
for key, value in dictionary.items():
if not isinstance(value, list):
tag_dict[key] = value
else:
dict_todo.append(value)
father_tag = ElementTree.SubElement(top, tag, tag_dict)
for el in dict_todo:
for single_el in el:
saveData(father_tag, single_el.tag_name, single_el.dictionary())
def saveFileRooms(path_file):
"""
funzione che salva la struttura dati su un file .rooms
prende in ingresso il path del file e la struttura che contiene tutti i dati
da salvare
"""
top = ElementTree.Element("world",
g_world.informations.dictionary())
for key_information in g_world.dictionary():
if key_information != "informations":
father = ElementTree.SubElement(top, key_information)
for key in g_world.__dict__[key_information]:
saveData(father, g_world.__dict__[key_information][key].tag_name,
g_world.__dict__[key_information][key].dictionary())
write_file = open(path_file, 'w')
write_file.write(prettify(top))
|
Python
| 0 |
@@ -235,21 +235,23 @@
uctdata.
-world
+project
import
@@ -252,21 +252,23 @@
mport g_
-world
+project
%0A%0Adef pr
@@ -1372,26 +1372,29 @@
g_
-world.informations
+project.data%5B'world'%5D
.dic
@@ -1416,92 +1416,85 @@
for
-key_information in g_world.dictionary():%0A if key_information != %22informations
+data_key, data_value in g_project.data.items():%0A if data_key != %22world
%22:%0A
@@ -1541,31 +1541,24 @@
nt(top,
-key_information
+data_key
)%0A
@@ -1574,192 +1574,110 @@
key
- in g_world.__dict__%5Bkey_information%5D:%0A saveData(father, g_world.__dict__%5Bkey_information%5D%5Bkey%5D.tag_name,%0A g_world.__dict__%5Bkey_information%5D%5Bkey%5D
+, value in data_value:%0A saveData(father, value.tag_name,%0A value
.dic
|
93d91ba059a7037281f6a5e4d6afd5e071668d81
|
Create freebook.py
|
freebook/reddit/freebook.py
|
freebook/reddit/freebook.py
|
Python
| 0 |
@@ -0,0 +1,1839 @@
+# Get free ebooks from Reddit%0A%0Afrom bs4 import BeautifulSoup%0Aimport feedparser%0Aimport requests%0A%0Aurl = %22https://www.reddit.com/r/freebooks.rss%22%0Aheaders = %7B%22User-Agent%22: %22Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:59.0) Gecko/20100101 Firefox/59.0%22%7D%0A%0Aurls = %5B%5D%0Abooks = %5B%5D%0Abook_data_all = %5B%5D%0A%0Ad = feedparser.parse(requests.get(url, headers=headers).text)%0Aprint(len(d.entries))%0A%0Afor e in d.entries:%0A%09for l in BeautifulSoup(e.description, %22html.parser%22).find_all(%22a%22):%0A%09%09if l.string == %22%5Blink%5D%22 and %22reddit%22 not in l%5B%22href%22%5D:%0A%09%09%09print(e.title)%0A%09%09%09print(l%5B%22href%22%5D)%0A%09%09%09urls.append(l%5B%22href%22%5D)%0A%09print()%0A%0Aprint(urls)%0Aprint(%22***GETTING BOOK DATA***%22)%0Afor u in urls:%0A%09if %22amazon%22 in u:%0A%09%09book_data = BeautifulSoup(requests.get(u, headers=headers).text, %22html.parser%22)%0A%09%09%0A%09%09print(u)%0A%0A%09%09title = book_data.find(%22span%22, attrs=%7B%22id%22:%22ebooksProductTitle%22%7D).string%0A%0A%09%09if %22Visit%22 in book_data.find(%22span%22, attrs=%7B%22class%22:%22author notFaded%22%7D).find(%22a%22, attrs=%7B%22class%22:%22a-link-normal%22%7D).string:%0A%09%09%09author = book_data.find(%22span%22, %7B%22class%22:%22a-size-medium%22%7D).text.replace(%22%5Cn%22, %22%22).replace(%22%5Ct%22, %22%22).replace(%22(Author)%22, %22%22).strip()%0A%09%09else:%0A%09%09%09author = book_data.find(%22span%22, attrs=%7B%22class%22:%22author notFaded%22%7D).find(%22a%22, attrs=%7B%22class%22:%22a-link-normal%22%7D).string%0A%0A%09%09try:%0A%09%09%09price = str(book_data.find(%22td%22, attrs=%7B%22class%22:%22a-color-price%22%7D)).replace(%22%5Cn%22, %22%22).replace(%22 %22, %22%22).split(%22%3E%22)%5B1%5D.split(%22%3C%22)%5B0%5D%0A%09%09except TypeError:%0A%09%09%09price = book_data.find(%22td%22, attrs=%7B%22class%22:%22a-color-base a-align-bottom a-text-strike%22%7D).string.strip()%0A%0A%09%09try:%0A%09%09%09book_data_all.append(%5Btitle, author, price, u%5D)%0A%09%09except Exception as e:%0A%09%09%09print(e)%0A%09%09%09continue%0A%0Aprint(book_data_all)%0Aprint(len(book_data_all))%0Afor b in book_data_all:%0A%09if b%5B2%5D == %22$0.00%22:%0A%09%09books.append(b)%0A%09else:%0A%09%09continue%0A%0Aprint(len(books))%0Aprint(str(len(book_data_all) - len(books)) + %22 paid books%22)%0Aprint(books)%0A
|
|
6edadeb278be9b776845a12954871386ead270d4
|
add tests for log rotation
|
plenum/test/test_log_rotation.py
|
plenum/test/test_log_rotation.py
|
Python
| 0 |
@@ -0,0 +1,1796 @@
+import pytest%0Aimport os%0Aimport logging%0Aimport shutil%0Aimport time%0Afrom plenum.common.logging.TimeAndSizeRotatingFileHandler %5C%0A import TimeAndSizeRotatingFileHandler%0A%0A%0Adef cleanFolder(path):%0A if os.path.exists(path):%0A shutil.rmtree(path)%0A os.makedirs(path, exist_ok=True)%0A return path%0A%0A%0Adef test_time_log_rotation():%0A logDirPath = cleanFolder(%22/tmp/plenum/test_time_log_rotation%22)%0A logFile = os.path.join(logDirPath, %22log%22)%0A logger = logging.getLogger('test_time_log_rotation-logger')%0A%0A logger.setLevel(logging.DEBUG)%0A handler = TimeAndSizeRotatingFileHandler(logFile, interval=1, when='s')%0A logger.addHandler(handler)%0A for i in range(3):%0A time.sleep(1)%0A logger.debug(%22line%22)%0A assert len(os.listdir(logDirPath)) == 4 # initial + 3 new%0A%0A%0Adef test_size_log_rotation():%0A logDirPath = cleanFolder(%22/tmp/plenum/test_size_log_rotation%22)%0A logFile = os.path.join(logDirPath, %22log%22)%0A logger = logging.getLogger('test_time_log_rotation-logger')%0A%0A logger.setLevel(logging.DEBUG)%0A handler = TimeAndSizeRotatingFileHandler(logFile, maxBytes=21)%0A logger.addHandler(handler)%0A for i in range(20):%0A logger.debug(%22line%22)%0A%0A assert len(os.listdir(logDirPath)) == 5%0A%0A%0Adef test_time_and_size_log_rotation():%0A logDirPath = cleanFolder(%22/tmp/plenum/test_time_and_size_log_rotation%22)%0A logFile = os.path.join(logDirPath, %22log%22)%0A logger = logging.getLogger('test_time_and_size_log_rotation-logger')%0A%0A logger.setLevel(logging.DEBUG)%0A handler = TimeAndSizeRotatingFileHandler(logFile, maxBytes=21, interval=1, when=%22s%22)%0A logger.addHandler(handler)%0A%0A for i in range(20):%0A logger.debug(%22line%22)%0A%0A for i in range(3):%0A time.sleep(1)%0A logger.debug(%22line%22)%0A%0A assert len(os.listdir(logDirPath)) == 8
|
|
fd75ee4a96eddc1e71eb85dd36a2c8f5b13807ca
|
Create RemoveLinkedListElement.py
|
RemoveLinkedListElement.py
|
RemoveLinkedListElement.py
|
Python
| 0.000001 |
@@ -0,0 +1,785 @@
+%22%22%22Remove Linked List Elements%0ARemove all elements from a linked list of integers that have value val.%0A%0AExample%0AGiven: 1 --%3E 2 --%3E 6 --%3E 3 --%3E 4 --%3E 5 --%3E 6, val = 6%0AReturn: 1 --%3E 2 --%3E 3 --%3E 4 --%3E 5 %0A%22%22%22%0Aclass ListNode(object):%0A def __init__(self, x):%0A self.val = x%0A self.next = None%0A%0Aclass Solution(object):%0A def removeElements(self, head, val):%0A %22%22%22%0A :type head: ListNode%0A :type val: int%0A :rtype: ListNode%0A %22%22%22%0A if not head:%0A return None%0A while head and head.val==val:%0A head=head.next%0A %0A pos=head%0A while pos and pos.next:%0A if pos.next.val==val:%0A pos.next=pos.next.next%0A else:%0A pos=pos.next%0A return head%0A%0A
|
|
fd33fadc260cda2bd2395f027457f990ab05480b
|
Add migration for Registration changed
|
registration/migrations/0008_auto_20160418_2250.py
|
registration/migrations/0008_auto_20160418_2250.py
|
Python
| 0 |
@@ -0,0 +1,739 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.9.4 on 2016-04-18 13:50%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('registration', '0007_auto_20160416_1217'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='registration',%0A name='payment_status',%0A field=models.CharField(choices=%5B('ready', 'Ready'), ('paid', 'Paid'), ('deleted', 'Deleted')%5D, default='ready', max_length=10),%0A ),%0A migrations.AlterField(%0A model_name='registration',%0A name='transaction_code',%0A field=models.CharField(blank=True, max_length=36),%0A ),%0A %5D%0A
|
|
3c1be9f8fb362699737b6dd867398e734057c300
|
Add main entry point.
|
rave/__main__.py
|
rave/__main__.py
|
Python
| 0 |
@@ -0,0 +1,1016 @@
+import argparse%0Aimport sys%0Afrom os import path%0A%0A%0Adef parse_arguments():%0A parser = argparse.ArgumentParser(description='A modular and extensible visual novel engine.', prog='rave')%0A parser.add_argument('-b', '--bootstrapper', help='Select bootstrapper to bootstrap the engine with. (default: autoselect)')%0A parser.add_argument('-B', '--game-bootstrapper', metavar='BOOTSTRAPPER', help='Select bootstrapper to bootstrap the game with. (default: autoselect)')%0A parser.add_argument('-d', '--debug', action='store_true', help='Enable debug logging.')%0A parser.add_argument('game', metavar='GAME', nargs='?', help='The game to run. Format dependent on used bootstrapper.')%0A%0A arguments = parser.parse_args()%0A return arguments%0A%0Adef main():%0A args = parse_arguments()%0A%0A if args.debug:%0A from . import log%0A log.Logger.LEVEL %7C= log.DEBUG%0A%0A from . import bootstrap%0A bootstrap.bootstrap_engine(args.bootstrapper)%0A bootstrap.bootstrap_game(args.game_bootstrapper, args.game)%0A%0Amain()%0A
|
|
592b3dda603dec0765825fc8dc03fb623906cb63
|
Add migration
|
infrastructure/migrations/0018_auto_20210928_1642.py
|
infrastructure/migrations/0018_auto_20210928_1642.py
|
Python
| 0.000002 |
@@ -0,0 +1,579 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.29 on 2021-09-28 14:42%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0Aimport django.db.models.deletion%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('infrastructure', '0017_auto_20210928_1329'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='project',%0A name='latest_implementation_year',%0A field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='infrastructure.FinancialYear'),%0A ),%0A %5D%0A
|
|
ede4704704f5f6b246d70c84a16be9465cfa55e2
|
Triplet with given sum
|
Arrays/triplet_with_given_sum.py
|
Arrays/triplet_with_given_sum.py
|
Python
| 0.999999 |
@@ -0,0 +1,1413 @@
+import unittest%0A%22%22%22%0AGiven an unsorted array of numbers, and a value, find a triplet whose sum is equal to value.%0AInput: 12 3 4 1 6 9, value = 24%0AOutput: 12 3 9%0A%22%22%22%0A%0A%22%22%22%0AApproach:%0A1. Sort the array.%0A2. Scan from left to right.%0A3. Fix current element as potential first element of triplet.%0A4. Find a pair which has sum as value - current element in the remaining sorted portion of array.%0A%22%22%22%0A%0A%0Adef find_triplet_with_given_sum(list_of_numbers, target_sum):%0A list_of_numbers = sorted(list_of_numbers)%0A for i in range(len(list_of_numbers)):%0A for j in range(i, len(list_of_numbers)):%0A low = j%0A high = len(list_of_numbers) - 1%0A actual_sum = list_of_numbers%5Bi%5D + list_of_numbers%5Blow%5D + list_of_numbers%5Bhigh%5D%0A if actual_sum == target_sum:%0A return list_of_numbers%5Bi%5D, list_of_numbers%5Blow%5D, list_of_numbers%5Bhigh%5D%0A elif actual_sum %3C target_sum:%0A low += 1%0A else:%0A high -= 1%0A%0A return None%0A%0A%0Aclass TestTripletSum(unittest.TestCase):%0A%0A def test_triplet_sum(self):%0A list_of_numbers = %5B12, 3, 4, 1, 6, 9%5D%0A triplet = find_triplet_with_given_sum(list_of_numbers, 24)%0A self.assertEqual(len(triplet), 3)%0A self.assertIn(12, triplet)%0A self.assertIn(3, triplet)%0A self.assertIn(9, triplet)%0A self.assertIsNone(find_triplet_with_given_sum(list_of_numbers, -12))%0A
|
|
05659cd132a5dfb54b50ec38ff1d405697de251a
|
Add crawler for superpoop
|
comics/crawler/crawlers/superpoop.py
|
comics/crawler/crawlers/superpoop.py
|
Python
| 0.000053 |
@@ -0,0 +1,849 @@
+from comics.crawler.base import BaseComicCrawler%0Afrom comics.crawler.meta import BaseComicMeta%0A%0Aclass ComicMeta(BaseComicMeta):%0A name = 'Superpoop'%0A language = 'en'%0A url = 'http://www.superpoop.com/'%0A start_date = '2008-01-01'%0A history_capable_days = 30%0A schedule = 'Mo,Tu,We,Th'%0A time_zone = -5%0A rights = 'Drew'%0A%0Aclass ComicCrawler(BaseComicCrawler):%0A def _get_url(self):%0A self.parse_feed('http://www.superpoop.com/rss/rss.php')%0A%0A for entry in self.feed.entries:%0A if self.timestamp_to_date(entry.updated_parsed) == self.pub_date:%0A self.title = entry.title%0A pieces = entry.summary.split('%22')%0A for i, piece in enumerate(pieces):%0A if piece.count('src='):%0A self.url = pieces%5Bi + 1%5D%0A return%0A
|
|
6da1f28296a8db0c18c0726dcfdc0067bebd9114
|
add a script to test learned DQN
|
learning_tools/keras-rl/dqn/dqn_tester.py
|
learning_tools/keras-rl/dqn/dqn_tester.py
|
Python
| 0 |
@@ -0,0 +1,1686 @@
+import numpy as np%0Aimport gym%0Aimport os%0Aimport pickle%0Aimport argparse%0Aimport pandas as pd%0A%0Afrom keras.models import Sequential%0Afrom keras.layers import Dense, Activation, Flatten%0Afrom keras.optimizers import Adam%0A%0Afrom rl.agents.dqn import DQNAgent%0Afrom rl.policy import BoltzmannQPolicy, LinearAnnealedPolicy%0Afrom rl.memory import SequentialMemory%0A%0Afrom oscar.env.envs.general_learning_env import GeneralLearningEnv%0A%0ACONFIG_FILE = 'config/learning_complex.json'%0AWEIGHT_FILE = 'ML_homework/results/2018-04-22_16/duel_dqn_learning_complex_weights.h5f'%0A%0A# Get the environment and extract the number of actions.%0Aenv = GeneralLearningEnv(CONFIG_FILE, True, log_file_path=None, publish_stats=False)%0Anp.random.seed(123)%0Aenv.seed(123)%0Anb_actions = env.action_space.n%0A%0Amodel = Sequential()%0Amodel.add(Flatten(input_shape=(1,) + env.observation_space.shape))%0Amodel.add(Dense(16))%0Amodel.add(Activation('relu'))%0Amodel.add(Dense(16))%0Amodel.add(Activation('relu'))%0Amodel.add(Dense(16))%0Amodel.add(Activation('relu'))%0Amodel.add(Dense(nb_actions, activation='linear'))%0Aprint(model.summary())%0A%0Amemory = SequentialMemory(limit=50000, window_length=1)%0Aboltzmann_policy = BoltzmannQPolicy(tau=1.0, clip=(0.0, 500.0))%0A# enable the dueling network%0A# you can specify the dueling_type to one of %7B'avg','max','naive'%7D%0Adqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10, policy=boltzmann_policy,%0A enable_dueling_network=True, dueling_type='avg', target_model_update=1e-2)%0Adqn.compile(Adam(lr=1e-3), metrics=%5B'mae'%5D)%0A%0Adqn.load_weights(WEIGHT_FILE)%0A%0A# Finally, evaluate our algorithm for 5 episodes.%0Adqn.test(env, nb_episodes=1, visualize=False)%0A%0Aenv.close()%0Adel env%0A
|
|
9f6f6b727458eb331d370443074a58d1efa6d755
|
Add migration for blank true.
|
kolibri/logger/migrations/0003_auto_20170531_1140.py
|
kolibri/logger/migrations/0003_auto_20170531_1140.py
|
Python
| 0.000141 |
@@ -0,0 +1,523 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.9.7 on 2017-05-31 18:40%0Afrom __future__ import unicode_literals%0A%0Aimport kolibri.core.fields%0Afrom django.db import migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('logger', '0002_auto_20170518_1031'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='usersessionlog',%0A name='last_interaction_timestamp',%0A field=kolibri.core.fields.DateTimeTzField(blank=True, null=True),%0A ),%0A %5D%0A
|
|
b04e3787de29d4bee68854e15a7e783cbe3c3bd0
|
Add test for microstructure generator
|
pymks/tests/test_microstructure_generator.py
|
pymks/tests/test_microstructure_generator.py
|
Python
| 0 |
@@ -0,0 +1,1519 @@
+import pytest%0Aimport numpy as np%0Afrom pymks.datasets import make_microstructure%0A%0A%[email protected]%0Adef test_size_and_grain_size_failure():%0A make_microstructure(n_samples=1, size=(7, 7), grain_size=(8, 1))%0A%0A%[email protected]%0Adef test_volume_fraction_failure():%0A make_microstructure(n_samples=1, volume_fraction=(0.3, 0.6))%0A%0A%[email protected]%0Adef test_volume_fraction_with_n_phases_failure():%0A make_microstructure(n_samples=1, size=(7, 7), n_phases=3,%0A volume_fraction=(0.5, 0.5))%0A%0A%[email protected]%0Adef test_percent_variance_exceeds_limit_failure():%0A make_microstructure(n_samples=1, size=(7, 7), n_phases=3,%0A volume_fraction=(0.3, 0.3, 0.4), percent_variance=0.5)%0A%0A%0Adef test_volume_fraction():%0A X = make_microstructure(n_samples=1, n_phases=3,%0A volume_fraction=(0.3, 0.2, 0.5))%0A assert np.allclose(np.sum(X == 1) / float(X.size), 0.2, rtol=1e-4)%0A assert np.allclose(np.sum(X == 2) / float(X.size), 0.5, atol=1e-4)%0A%0A%0Adef test_percent_variance():%0A X = make_microstructure(n_samples=1, n_phases=3,%0A volume_fraction=(0.3, 0.2, 0.5),%0A percent_variance=.2)%0A print np.sum(X == 1) / float(X.size)%0A print np.sum(X == 2) / float(X.size)%0A assert np.allclose(np.sum(X == 1) / float(X.size), 0.09, atol=1e-2)%0A assert np.allclose(np.sum(X == 2) / float(X.size), 0.57, atol=1e-2)%0A%0Aif __name__ == '__main__':%0A test_volume_fraction()%0A test_percent_variance()%0A
|
|
04dcdadf4f8b18405754683af0138ddc8363580e
|
Create followExpression.py
|
maya/python/animation/followExpression.py
|
maya/python/animation/followExpression.py
|
Python
| 0.000001 |
@@ -0,0 +1,1118 @@
+ctrlShape = cmds.createNode('locator')%0ActrlTransform = cmds.listRelatives(ctrlShape,p=True,f=True)%0Aif isinstance(ctrlTransform,list):%0A ctrlTransform = ctrlTransform%5B0%5D%0Ajt = cmds.createNode('joint',n='followJoint')%0A%0AattrName = 'follow'%0Aif not cmds.attributeQuery(attrName,n=ctrlTransform,ex=True):%0A cmds.addAttr(ctrlTransform,ln=attrName,at='double',min=0.0,max=1.0,dv=0.1)%0A cmds.setAttr('%25s.%25s'%25(ctrlTransform,attrName),e=True,k=True)%0A%0Aexp = '%7B%5Cn%5Ct$tx1 = %25s.translateX;%5Cn'%25ctrlTransform%0Aexp += '%5Ct$ty1 = %25s.translateY;%5Cn'%25ctrlTransform%0Aexp += '%5Ct$tz1 = %25s.translateZ;%5Cn'%25ctrlTransform%0Aexp += '%5Ct$tx2 = %25s.translateX;%5Cn'%25jt%0Aexp += '%5Ct$ty2 = %25s.translateY;%5Cn'%25jt%0Aexp += '%5Ct$tz2 = %25s.translateZ;%5Cn'%25jt%0Aexp += '%5Ct%5Cn%5Ct$f = %25s.follow;%5Cn'%25ctrlTransform%0Aexp += '%5Ct$dx = $tx1;%5Cn'%0Aexp += '%5Ct$dy = $ty1;%5Cn'%0Aexp += '%5Ct$dz = $tz1;%5Cn'%0Aexp += '%5Ctif ($f %3E 0.0)%5Cn%5Ct%7B%5Cn%5Ct%5Ct$dx = ($tx1-$tx2)*$f;%5Cn'%0Aexp += '%5Ct%5Ct$dy = ($ty1-$ty2)*$f;%5Cn'%0Aexp += '%5Ct%5Ct$dz = ($tz1-$tz2)*$f;%5Cn'%0Aexp += '%5Ct%7D%5Cn%5Ct%25s.translateX += $dx;%5Cn'%25jt%0Aexp += '%5Ct%25s.translateY += $dy;%5Cn'%25jt%0Aexp += '%5Ct%25s.translateZ += $dz;%5Cn'%25jt%0Aexp += '%7D'%0Acmds.expression(s=exp)%0A
|
|
58c62061c0c02682f96d6793b0570b455887d392
|
Add pytest tools
|
delocate/tests/pytest_tools.py
|
delocate/tests/pytest_tools.py
|
Python
| 0.000001 |
@@ -0,0 +1,514 @@
+import pytest%0A%0A%0Adef assert_true(condition):%0A __tracebackhide__ = True%0A assert condition%0A%0A%0Adef assert_false(condition):%0A __tracebackhide__ = True%0A assert not condition%0A%0A%0Adef assert_raises(expected_exception, *args, **kwargs):%0A __tracebackhide__ = True%0A return pytest.raises(expected_exception, *args, **kwargs)%0A%0A%0Adef assert_equal(first, second):%0A __tracebackhide__ = True%0A assert first == second%0A%0A%0Adef assert_not_equal(first, second):%0A __tracebackhide__ = True%0A assert first != second%0A
|
|
dd93995a119323d9b67dce1f8797eb72788a044a
|
solve 12704
|
UVA/vol-127/12704.py
|
UVA/vol-127/12704.py
|
Python
| 0.999999 |
@@ -0,0 +1,211 @@
+from sys import stdin, stdout%0AI = list(map(int, stdin.read().split()))%0A%0Afor i in range(0, I%5B0%5D):%0A %5Bx, y, r%5D = I%5B3*i + 1: 3*i + 4%5D%0A cd = (x*x + y*y) ** 0.5%0A stdout.write('%7B:.2f%7D %7B:.2f%7D%5Cn'.format(r-cd, r+cd))%0A%0A
|
|
3ad0f9ee142e3a08e82749f47003870f14029bff
|
Fix urls.py to point to web version of view
|
mysite/urls.py
|
mysite/urls.py
|
from django.conf.urls.defaults import *
import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^$', 'mysite.search.views.fetch_bugs'),
(r'^search/$', 'mysite.search.views.fetch_bugs'),
(r'^admin/(.*)', admin.site.root),
(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.STATIC_DOC_ROOT}),
(r'^people/add_contribution$', 'mysite.profile.views.add_contribution_web'),
(r'^people/$', 'mysite.profile.views.display_person_web'),
(r'^people/get_data_for_email$', 'mysite.profile.views.get_data_for_email'),
(r'^people/change_what_like_working_on$',
'mysite.profile.views.change_what_like_working_on_web'),
(r'^people/add_tag_to_project_exp$',
'mysite.profile.views.add_tag_to_project_exp_web'),
(r'^people/project_exp_tag__remove$',
'mysite.profile.views.project_exp_tag__remove__web'),
(r'^people/make_favorite_project_exp$',
'mysite.profile.views.make_favorite_project_exp_web'),
(r'^people/make_favorite_exp_tag$',
'mysite.profile.views.make_favorite_exp_tag_web'),
(r'^people/add_contrib$',
'mysite.profile.views.display_person_old'),
(r'^people/sf_projects_by_person$',
'mysite.profile.views.sf_projects_by_person_web'),
# Experience scraper
(r'^people/exp_scraper$',
'mysite.profile.views.exp_scraper_display_input_form'),
(r'^people/exp_scrape$',
'mysite.profile.views.exp_scraper_scrape'),
# Get a list of suggestions for the search input, formatted the way that
# the jQuery autocomplete plugin wants it.
(r'^search/get_suggestions$', 'mysite.search.views.request_jquery_autocompletion_suggestions'),
)
# vim: set ai ts=4 sts=4 et sw=4:
|
Python
| 0.000004 |
@@ -1620,16 +1620,20 @@
r_scrape
+_web
'),%0A%0A
|
8706ec4678bc4740b64265ced63fb12d837e0297
|
Add Basic Histogram Example
|
altair/vegalite/v2/examples/histogram.py
|
altair/vegalite/v2/examples/histogram.py
|
Python
| 0 |
@@ -0,0 +1,435 @@
+%22%22%22%0AHistogram%0A-----------------%0AThis example shows how to make a basic histogram, based on the vega-lite docs%0Ahttps://vega.github.io/vega-lite/examples/histogram.html%0A%22%22%22%0Aimport altair as alt%0A%0Amovies = alt.load_dataset('movies')%0A%0Achart = alt.Chart(movies).mark_bar().encode(%0A x=alt.X(%22IMDB_Rating%22,%0A type='quantitative',%0A bin=alt.BinTransform(%0A maxbins=10,%0A )),%0A y='count(*):Q',%0A)%0A
|
|
4f1ddebb0fc185dfe4cd5167c67be8f6cea78273
|
Create listenCmd.py
|
listenCmd.py
|
listenCmd.py
|
Python
| 0.000002 |
@@ -0,0 +1,1525 @@
+#!/usr/bin/python%0A%0A#impoer the necessary modules%0Aimport re # the regexp module%0A%0A# listen command test python file%0A%0A%0A# // THE FCNS //%0A%0A# the fcn that iterate through the recognized command list to find a match with the received pseech command%0Adef listenForCommand( theCommand ):%0A%09#for s in range( len( cmdsList ) ):%0A%09for k in commandsParams.items():%0A%09%09# hold the current 'loop%5Bi%5D' to rung the matching process against it%0A%09%09#matchingCmd = re.search(r cmdsList%5Bi%5D, theCommand )%0A%09%09matchingCmd = re.search(r%22say hello%22, theCommand )%0A%09%09%0A%09%09# check if the match was successfull and end the iteraction / handle it%0A%09%09if matchingCmd:%0A%09%09%09print %22Matching command found:%22%0A%09%09%09print matchingCmd%0A%09%09%09print %22Associated function:%22%0A%09%09%09#print fcnsList%5Bs%5D%0A%09%09%09# end the iteration as we found the command%0A%09%09%09break%0A%09%09%0A%09%09else:%0A%09%09%09# continue to loop until 'cmdsList' has been fully iterated over (..)%0A%0A%0A# the settings ( commands recognized ans associated functions )%0AcmdsList = %5B%22say hello%22, %22repeat after me%22, %22do the cleaning%22, %22do my work%22%5D # IndentationError: expected an indented block%0AfcnsList = %5B'sayHello', 'repeatAfterMe', 'doTheCleaning', 'doMyWork'%5D%0A%0AcommandsParams = %7B%22say hello%22 : %22sayHello%22, %22repeat after me%22 : %22repeatAfterMe%22, %22do the cleaning%22 : %22doTheCleaning%22, %22do my work%22 : %22doMyWork%22%7D # this is a dictionary%0A%0A%0A%0A%0A%0A%0A# // THE PRGM //%0A%0Aprint %22%5Cn PRGORAM BEGIN %5Cn%22%0A%0A# fake received speech on wich we iterate to find a matching command%0AreceivedCmd = %22say hello%22%0A%0A# try to find a match with a fake command%0AlistenForCommand( receivedCmd )%0A
|
|
fd1b2885057512d6b91a2b2ed4df183e66093e61
|
Create extended_iter_with_peek.py
|
lld_practice/extended_iter_with_peek.py
|
lld_practice/extended_iter_with_peek.py
|
Python
| 0.000001 |
@@ -0,0 +1,1407 @@
+%0Aclass ExtendedIter:%0A %22%22%22An extended iterator that wraps around an existing iterators.%0A It provides extra methods:%0A - %60has_next()%60: checks if we can still yield items.%0A - %60peek()%60: returns the next element of our iterator, but doesn't pass by it.%0A If there's nothing more to return, raises %60StopIteration%60 error.%0A %22%22%22%0A %0A def __init__(self, i):%0A self._myiter = iter(i)%0A self._next_element = None%0A self._has_next = 0%0A self._prime()%0A%0A%0A def has_next(self):%0A %22%22%22Returns true if we can call next() without raising a%0A StopException.%22%22%22%0A return self._has_next%0A%0A%0A def peek(self):%0A %22%22%22Nonexhaustively returns the next element in our iterator.%22%22%22%0A assert self.has_next()%0A return self._next_element%0A%0A%0A def next(self):%0A %22%22%22Returns the next element in our iterator.%22%22%22%0A if not self._has_next:%0A raise StopIteration%0A result = self._next_element%0A self._prime()%0A return result%0A%0A%0A def _prime(self):%0A %22%22%22Private function to initialize the states of%0A self._next_element and self._has_next. We poke our%0A self._myiter to see if it's still alive and kicking.%22%22%22%0A try:%0A self._next_element = self._myiter.next()%0A self._has_next = 1%0A except StopIteration:%0A self.next_element = None%0A self._has_next = 0%0A
|
|
7ce7ce4bd899e6c386de669d11a2fc5593157c91
|
move processing in a mixin
|
pipeline/storage.py
|
pipeline/storage.py
|
import os
try:
from staticfiles import finders
from staticfiles.storage import CachedStaticFilesStorage, StaticFilesStorage
except ImportError:
from django.contrib.staticfiles import finders
from django.contrib.staticfiles.storage import CachedStaticFilesStorage, StaticFilesStorage
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import get_storage_class
from django.utils.functional import LazyObject
from pipeline.conf import settings
class PipelineStorage(StaticFilesStorage):
def get_available_name(self, name):
if self.exists(name):
self.delete(name)
return name
def post_process(self, paths, dry_run=False, **options):
from pipeline.packager import Packager
if dry_run:
return []
packager = Packager(storage=self)
for package_name in packager.packages['css']:
package = packager.package_for('css', package_name)
output_file = packager.pack_stylesheets(package)
paths[output_file] = (self, output_file)
for package_name in packager.packages['js']:
package = packager.package_for('js', package_name)
output_file = packager.pack_javascripts(package)
paths[output_file] = (self, output_file)
super_class = super(PipelineStorage, self)
if hasattr(super_class, 'post_process'):
return super_class.post_process(paths, dry_run, **options)
return [
(path, path, True)
for path in paths
]
class PipelineCachedStorage(PipelineStorage, CachedStaticFilesStorage):
def post_process(self, paths, dry_run=False, **options):
from pipeline.packager import Packager
packager = Packager(storage=self)
if dry_run:
for asset_type in ['css', 'js']:
for package_name in packager.packages[asset_type]:
package = packager.package_for('js', package_name)
paths[package.output_filename] = (self, package.output_filename)
self.cache.delete_many([self.cache_key(path) for path in paths])
return []
return super(PipelineCachedStorage, self).post_process(paths, dry_run, **options)
class BaseFinderStorage(PipelineStorage):
finders = None
def __init__(self, finders=None, *args, **kwargs):
if finders is not None:
self.finders = finders
if self.finders is None:
raise ImproperlyConfigured("The storage %r doesn't have a finders class assigned." % self.__class__)
super(BaseFinderStorage, self).__init__(*args, **kwargs)
def path(self, name):
path = self.finders.find(name)
if not path:
path = super(BaseFinderStorage, self).path(name)
return path
def exists(self, name):
exists = self.finders.find(name) != None
if not exists:
exists = super(BaseFinderStorage, self).exists(name)
return exists
def listdir(self, path):
for finder in finders.get_finders():
for storage in finder.storages.values():
try:
return storage.listdir(path)
except OSError:
pass
def _save(self, name, content):
for finder in finders.get_finders():
for path, storage in finder.list([]):
if os.path.dirname(name) in path:
return storage._save(name, content)
class PipelineFinderStorage(BaseFinderStorage):
finders = finders
class DefaultStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(settings.PIPELINE_STORAGE)()
default_storage = DefaultStorage()
|
Python
| 0 |
@@ -87,34 +87,26 @@
t Cached
-StaticFilesStorage
+FilesMixin
, Static
@@ -246,34 +246,26 @@
t Cached
-StaticFilesStorage
+FilesMixin
, Static
@@ -493,308 +493,173 @@
line
-Storage(StaticFilesStorage):%0A def get_available_name(self, name):%0A if self.exists(name):%0A self.delete(name)%0A return name%0A%0A def post_process(self, paths, dry_run=False, **options):%0A from pipeline.packager import Packager%0A if dry_run:%0A return %5B%5D%0A
+Mixin(object):%0A def post_process(self, paths, dry_run=False, **options):%0A if dry_run:%0A return %5B%5D%0A%0A from pipeline.packager import Packager
%0A
@@ -1196,23 +1196,21 @@
Pipeline
-Storage
+Mixin
, self)%0A
@@ -1423,188 +1423,77 @@
%5D%0A%0A
-%0Aclass PipelineCachedStorage(PipelineStorage, CachedStaticFilesStorage):%0A def post_process(self, paths, dry_run=False, **options):%0A from pipeline.packager import Packager
+ def get_available_name(self, name):%0A if self.exists(name):
%0A
@@ -1501,467 +1501,175 @@
-%0A
- packager = Packager(storage=self)%0A if dry_run:%0A for asset_type in %5B'css', 'js'%5D:%0A for package_name in packager.packages%5Basset_type%5D:%0A package = packager.package_for('js', package_name)%0A paths%5Bpackage.output_filename%5D = (self, package.output_filename)%0A self.cache.delete_many(%5Bself.cache_key(path) for path in paths%5D)%0A return %5B%5D%0A return super(
+self.delete(name)%0A return name%0A%0A%0Aclass PipelineStorage(PipelineMixin, StaticFilesStorage):%0A pass%0A%0A%0Aclass PipelineCachedStorage(CachedFilesMixin,
Pipeline
Cach
@@ -1668,68 +1668,26 @@
line
-Cached
Storage
-, self).post_process(paths, dry_run, **options)
+):%0A pass
%0A%0A%0Ac
|
77e980157f51af421eceb7c7b7a84945d8d33a91
|
Convert caffemodel of FCN8s to chainer model
|
scripts/caffe_to_chainermodel.py
|
scripts/caffe_to_chainermodel.py
|
Python
| 0.999999 |
@@ -0,0 +1,1226 @@
+#!/usr/bin/env python%0A%0Afrom __future__ import print_function%0Aimport argparse%0Aimport os.path as osp%0A%0Aimport caffe%0Aimport chainer.functions as F%0Aimport chainer.serializers as S%0A%0Aimport fcn%0Afrom fcn.models import FCN8s%0A%0A%0Adata_dir = fcn.get_data_dir()%0Acaffemodel = osp.join(data_dir, 'voc-fcn8s/fcn8s-heavy-pascal.caffemodel')%0Acaffe_prototxt = osp.join(data_dir, 'voc-fcn8s/deploy.prototxt')%0Achainermodel = osp.join(data_dir, 'fcn8s.chainermodel')%0A%0Anet = caffe.Net(caffe_prototxt, caffemodel, caffe.TEST)%0A%0A# TODO(pfnet): chainer CaffeFunction not support some layers%0A# from chainer.functions.caffe import CaffeFunction%0A# func = CaffeFunction(caffemodel)%0A%0Amodel = FCN8s()%0Afor name, param in net.params.iteritems():%0A layer = getattr(model, name)%0A%0A has_bias = True%0A if len(param) == 1:%0A has_bias = False%0A%0A print('%7B0%7D:'.format(name))%0A # weight%0A print(' - W:', param%5B0%5D.data.shape, layer.W.data.shape)%0A assert param%5B0%5D.data.shape == layer.W.data.shape%0A layer.W.data = param%5B0%5D.data%0A # bias%0A if has_bias:%0A print(' - b:', param%5B1%5D.data.shape, layer.b.data.shape)%0A assert param%5B1%5D.data.shape == layer.b.data.shape%0A layer.b.data = param%5B1%5D.data%0A%0AS.save_hdf5(chainermodel, model)%0A
|
|
a8423d5759a951b7f8d765203e3a02a6d3211f35
|
add body task generator
|
neurolabi/python/flyem/BodyTaskManager.py
|
neurolabi/python/flyem/BodyTaskManager.py
|
Python
| 0.000008 |
@@ -0,0 +1,2513 @@
+'''%0ACreated on Sep 18, 2013%0A%0A@author: zhaot%0A'''%0Aimport os;%0A%0Aclass ExtractBodyTaskManager:%0A '''%0A classdocs%0A '''%0A%0A def __init__(self):%0A '''%0A Constructor%0A '''%0A self.commandPath = '';%0A self.minSize = 0;%0A self.maxSize = -1;%0A self.overwriteLevel = 1%0A self.zOffset = 0%0A self.bodyMapDir = ''%0A self.output = ''%0A self.bodysizeFile = ''%0A self.jobNumber = 5%0A %0A def setCommandPath(self, path):%0A self.commandPath = path;%0A %0A def setRange(self, bodySizeRange):%0A self.minSize = bodySizeRange%5B0%5D;%0A self.maxSize = bodySizeRange%5B1%5D;%0A %0A def setOverwriteLevel(self, level):%0A self.overwriteLevel = level;%0A %0A def setZOffset(self, offset):%0A self.zOffset = offset;%0A %0A def setJobNumber(self, n):%0A self.jobNumber = n;%0A%0A def setOutput(self, output):%0A self.output = output;%0A %0A def setBodyMapDir(self, inputBodyMap):%0A self.bodyMapDir = inputBodyMap%0A %0A def setBodySizeFile(self, filePath):%0A self.bodysizeFile = filePath%0A %0A def useCluster(self, using):%0A self.usingCluster = using;%0A %0A def getFullCommand(self):%0A command = self.commandPath + ' ' + self.bodyMapDir + ' -o ' + self.output + %5C%0A ' --sobj ' + ' --minsize ' + str(self.minSize);%0A if self.maxSize %3E= self.minSize:%0A command += ' --maxsize ' + str(self.maxSize)%0A command += ' --overwrite_level ' + str(self.overwriteLevel);%0A if self.bodysizeFile:%0A command += ' --bodysize_file ' + self.bodysizeFile%0A command += ' --z_offset ' + str(self.zOffset)%0A %0A return command;%0A %0A def generateScript(self, outputDir):%0A script = self.output%0A scriptFile = open(script, 'w')%0A if scriptFile:%0A scriptFile.write(self.getFullCommand())%0A scriptFile.close()%0A%0Aif __name__ == '__main__':%0A from os.path import expanduser%0A home = expanduser(%22~%22)%0A %0A taskManager = ExtractBodyTaskManager()%0A taskManager.setBodyMapDir('../body_maps')%0A taskManager.setOutput('.')%0A taskManager.setRange(%5B100000, -1%5D)%0A taskManager.setOverwriteLevel(1)%0A taskManager.setBodySizeFile('bodysize.txt')%0A taskManager.setZOffset(1490)%0A%0A taskManager.setCommandPath(home + '/Work/neutube/neurolabi/cpp/'%0A 'extract_body-build-Qt_4_8_1_gcc-Debug/extract_body');%0A print taskManager.getFullCommand();%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.