commit
stringlengths
40
40
old_file
stringlengths
5
117
new_file
stringlengths
5
117
old_contents
stringlengths
0
1.93k
new_contents
stringlengths
19
3.3k
subject
stringlengths
17
320
message
stringlengths
18
3.28k
lang
stringclasses
1 value
license
stringclasses
13 values
repos
stringlengths
7
42.4k
completion
stringlengths
152
6.66k
prompt
stringlengths
21
3.65k
ec7791663ed866d240edbaf5e0dd766e9418e1ff
cla_backend/apps/status/tests/smoketests.py
cla_backend/apps/status/tests/smoketests.py
import unittest from celery import Celery from django.conf import settings from django.db import connection class SmokeTests(unittest.TestCase): def setUp(self): pass def test_can_access_db(self): cursor = connection.cursor() cursor.execute('SELECT 1') row = cursor.fetchone() self.assertEqual(1, row[0]) def test_can_access_celery(self): if not getattr(settings, 'CELERY_ALWAYS_EAGER', False): conn = Celery('cla_backend').connection() conn.connect() conn.release()
import unittest from celery import Celery from django.conf import settings from django.db import connection class SmokeTests(unittest.TestCase): def setUp(self): pass def test_can_access_db(self): "access the database" cursor = connection.cursor() cursor.execute('SELECT 1') row = cursor.fetchone() self.assertEqual(1, row[0]) def test_can_access_celery(self): "connect to SQS" if not getattr(settings, 'CELERY_ALWAYS_EAGER', False): conn = Celery('cla_backend').connection() conn.connect() conn.release()
Add docstrings so that hubot can say what went wrong
Add docstrings so that hubot can say what went wrong
Python
mit
ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend
<INSERT> "access the database" <INSERT_END> <INSERT> "connect to SQS" <INSERT_END> <|endoftext|> import unittest from celery import Celery from django.conf import settings from django.db import connection class SmokeTests(unittest.TestCase): def setUp(self): pass def test_can_access_db(self): "access the database" cursor = connection.cursor() cursor.execute('SELECT 1') row = cursor.fetchone() self.assertEqual(1, row[0]) def test_can_access_celery(self): "connect to SQS" if not getattr(settings, 'CELERY_ALWAYS_EAGER', False): conn = Celery('cla_backend').connection() conn.connect() conn.release()
Add docstrings so that hubot can say what went wrong import unittest from celery import Celery from django.conf import settings from django.db import connection class SmokeTests(unittest.TestCase): def setUp(self): pass def test_can_access_db(self): cursor = connection.cursor() cursor.execute('SELECT 1') row = cursor.fetchone() self.assertEqual(1, row[0]) def test_can_access_celery(self): if not getattr(settings, 'CELERY_ALWAYS_EAGER', False): conn = Celery('cla_backend').connection() conn.connect() conn.release()
75131bdf806c56970f3160de3e6d476d9ecbc3a7
python/deleteNodeInALinkedList.py
python/deleteNodeInALinkedList.py
# https://leetcode.com/problems/delete-node-in-a-linked-list/ # Definition for singly-linked list. # class ListNode(object): # def __init__(self, x): # self.val = x # self.next = None class Solution(object): def deleteNode(self, node): """ :type node: ListNode :rtype: void Do not return anything, modify node in-place instead. """ while node.next != None: node.val = node.next.val if node.next.next is None: node.next = None else: node = node.next
Add problem delete note in a linked list
Add problem delete note in a linked list
Python
mit
guozengxin/myleetcode,guozengxin/myleetcode
<INSERT> # https://leetcode.com/problems/delete-node-in-a-linked-list/ # Definition for singly-linked list. # class ListNode(object): # <INSERT_END> <INSERT> def __init__(self, x): # self.val = x # self.next = None class Solution(object): def deleteNode(self, node): """ :type node: ListNode :rtype: void Do not return anything, modify node in-place instead. """ while node.next != None: node.val = node.next.val if node.next.next is None: node.next = None else: node = node.next <INSERT_END> <|endoftext|> # https://leetcode.com/problems/delete-node-in-a-linked-list/ # Definition for singly-linked list. # class ListNode(object): # def __init__(self, x): # self.val = x # self.next = None class Solution(object): def deleteNode(self, node): """ :type node: ListNode :rtype: void Do not return anything, modify node in-place instead. """ while node.next != None: node.val = node.next.val if node.next.next is None: node.next = None else: node = node.next
Add problem delete note in a linked list
48857638694ceca08c64d7b9c6825e2178c53279
pylearn2/utils/doc.py
pylearn2/utils/doc.py
""" Documentation-related helper classes/functions """ class soft_wraps: """ A Python decorator which concatenates two functions' docstrings: one function is defined at initialization and the other one is defined when soft_wraps is called. This helps reduce the ammount of documentation to write: one can use this decorator on child classes' functions when their implementation is similar to the one of the parent class. Conversely, if a function defined in a child class departs from its parent's implementation, one can simply explain the differences in a 'Notes' section without re-writing the whole docstring. Examples -------- >>> class Parent(object): ... def f(x): ... ''' ... Adds 1 to x ... ... Parameters ... ---------- ... x : int ... Variable to increment by 1 ... ... Returns ... ------- ... rval : int ... x incremented by 1 ... ''' ... rval = x + 1 ... return rval ... >>> class Child(Parent): ... @soft_wraps(Parent.f) ... def f(x): ... ''' ... Notes ... ----- ... Also prints the incremented value ... ''' ... rval = x + 1 ... print rval ... return rval ... >>> c = Child() >>> print c.f.__doc__ Adds 1 to x Parameters ---------- x : int Variable to increment by 1 Returns ------- rval : int x incremented by 1 Notes ----- Also prints the incremented value """ def __init__(self, f, append=False): """ Parameters ---------- f : function Function whose docstring will be concatenated with the decorated function's docstring prepend : bool, optional If True, appends f's docstring to the decorated function's docstring instead of prepending it. Defaults to False. """ self.f = f self.append = append def __call__(self, f): """ Prepend self.f's docstring to f's docstring (or append it if `self.append == True`). Parameters ---------- f : function Function to decorate Returns ------- f : function Function f passed as argument with self.f's docstring {pre,ap}pended to it """ if self.append: f.__doc__ += + self.f.__doc__ else: f.__doc__ = self.f.__doc__ + f.__doc__ return f
Add function decorator to improve functools.wraps
Add function decorator to improve functools.wraps
Python
bsd-3-clause
goodfeli/pylearn2,JesseLivezey/pylearn2,TNick/pylearn2,fulmicoton/pylearn2,pkainz/pylearn2,Refefer/pylearn2,woozzu/pylearn2,kastnerkyle/pylearn2,CIFASIS/pylearn2,mclaughlin6464/pylearn2,hyqneuron/pylearn2-maxsom,aalmah/pylearn2,bartvm/pylearn2,JesseLivezey/pylearn2,nouiz/pylearn2,lamblin/pylearn2,CIFASIS/pylearn2,junbochen/pylearn2,ddboline/pylearn2,junbochen/pylearn2,ddboline/pylearn2,alexjc/pylearn2,w1kke/pylearn2,abergeron/pylearn2,mkraemer67/pylearn2,jamessergeant/pylearn2,fyffyt/pylearn2,fishcorn/pylearn2,fyffyt/pylearn2,matrogers/pylearn2,matrogers/pylearn2,aalmah/pylearn2,lunyang/pylearn2,skearnes/pylearn2,mkraemer67/pylearn2,theoryno3/pylearn2,kose-y/pylearn2,se4u/pylearn2,aalmah/pylearn2,daemonmaker/pylearn2,jeremyfix/pylearn2,hyqneuron/pylearn2-maxsom,lancezlin/pylearn2,Refefer/pylearn2,ddboline/pylearn2,lancezlin/pylearn2,kose-y/pylearn2,JesseLivezey/plankton,hantek/pylearn2,goodfeli/pylearn2,woozzu/pylearn2,ashhher3/pylearn2,bartvm/pylearn2,shiquanwang/pylearn2,TNick/pylearn2,hantek/pylearn2,lancezlin/pylearn2,TNick/pylearn2,daemonmaker/pylearn2,pkainz/pylearn2,fyffyt/pylearn2,alexjc/pylearn2,lunyang/pylearn2,ddboline/pylearn2,hantek/pylearn2,woozzu/pylearn2,pombredanne/pylearn2,TNick/pylearn2,jeremyfix/pylearn2,hyqneuron/pylearn2-maxsom,fishcorn/pylearn2,lisa-lab/pylearn2,kastnerkyle/pylearn2,mkraemer67/pylearn2,abergeron/pylearn2,alexjc/pylearn2,pombredanne/pylearn2,hyqneuron/pylearn2-maxsom,daemonmaker/pylearn2,KennethPierce/pylearnk,cosmoharrigan/pylearn2,aalmah/pylearn2,shiquanwang/pylearn2,JesseLivezey/plankton,KennethPierce/pylearnk,jamessergeant/pylearn2,caidongyun/pylearn2,fishcorn/pylearn2,mkraemer67/pylearn2,jamessergeant/pylearn2,CIFASIS/pylearn2,chrish42/pylearn,lunyang/pylearn2,fulmicoton/pylearn2,bartvm/pylearn2,mclaughlin6464/pylearn2,lunyang/pylearn2,se4u/pylearn2,fyffyt/pylearn2,Refefer/pylearn2,matrogers/pylearn2,mclaughlin6464/pylearn2,se4u/pylearn2,nouiz/pylearn2,jamessergeant/pylearn2,JesseLivezey/pylearn2,chrish42/pylearn,KennethPierce/pylearnk,cosmoharrigan/pylearn2,sandeepkbhat/pylearn2,theoryno3/pylearn2,w1kke/pylearn2,goodfeli/pylearn2,JesseLivezey/pylearn2,Refefer/pylearn2,caidongyun/pylearn2,msingh172/pylearn2,kastnerkyle/pylearn2,skearnes/pylearn2,KennethPierce/pylearnk,abergeron/pylearn2,kastnerkyle/pylearn2,skearnes/pylearn2,jeremyfix/pylearn2,pombredanne/pylearn2,ashhher3/pylearn2,lisa-lab/pylearn2,lamblin/pylearn2,junbochen/pylearn2,ashhher3/pylearn2,lamblin/pylearn2,CIFASIS/pylearn2,lisa-lab/pylearn2,pkainz/pylearn2,pombredanne/pylearn2,fishcorn/pylearn2,se4u/pylearn2,pkainz/pylearn2,junbochen/pylearn2,cosmoharrigan/pylearn2,goodfeli/pylearn2,theoryno3/pylearn2,ashhher3/pylearn2,alexjc/pylearn2,jeremyfix/pylearn2,JesseLivezey/plankton,chrish42/pylearn,msingh172/pylearn2,mclaughlin6464/pylearn2,sandeepkbhat/pylearn2,shiquanwang/pylearn2,w1kke/pylearn2,caidongyun/pylearn2,skearnes/pylearn2,matrogers/pylearn2,fulmicoton/pylearn2,shiquanwang/pylearn2,chrish42/pylearn,lisa-lab/pylearn2,kose-y/pylearn2,hantek/pylearn2,kose-y/pylearn2,msingh172/pylearn2,sandeepkbhat/pylearn2,w1kke/pylearn2,msingh172/pylearn2,lancezlin/pylearn2,woozzu/pylearn2,daemonmaker/pylearn2,JesseLivezey/plankton,fulmicoton/pylearn2,caidongyun/pylearn2,bartvm/pylearn2,nouiz/pylearn2,cosmoharrigan/pylearn2,sandeepkbhat/pylearn2,nouiz/pylearn2,lamblin/pylearn2,theoryno3/pylearn2,abergeron/pylearn2
<REPLACE_OLD> <REPLACE_NEW> """ Documentation-related helper classes/functions """ class soft_wraps: """ A Python decorator which concatenates two functions' docstrings: one function is defined at initialization and the other one is defined when soft_wraps is called. This helps reduce the ammount of documentation to write: one can use this decorator on child classes' functions when their implementation is similar to the one of the parent class. Conversely, if a function defined in a child class departs from its parent's implementation, one can simply explain the differences in a 'Notes' section without re-writing the whole docstring. Examples -------- >>> class Parent(object): ... def f(x): ... ''' ... Adds 1 to x ... ... Parameters ... ---------- ... x : int ... Variable to increment by 1 ... ... Returns ... ------- ... rval : int ... x incremented by 1 ... ''' ... rval = x + 1 ... return rval ... >>> class Child(Parent): ... @soft_wraps(Parent.f) ... def f(x): ... ''' ... Notes ... ----- ... Also prints the incremented value ... ''' ... rval = x + 1 ... print rval ... return rval ... >>> c = Child() >>> print c.f.__doc__ Adds 1 to x Parameters ---------- x : int Variable to increment by 1 Returns ------- rval : int x incremented by 1 Notes ----- Also prints the incremented value """ def __init__(self, f, append=False): """ Parameters ---------- f : function Function whose docstring will be concatenated with the decorated function's docstring prepend : bool, optional If True, appends f's docstring to the decorated function's docstring instead of prepending it. Defaults to False. """ self.f = f self.append = append def __call__(self, f): """ Prepend self.f's docstring to f's docstring (or append it if `self.append == True`). Parameters ---------- f : function Function to decorate Returns ------- f : function Function f passed as argument with self.f's docstring {pre,ap}pended to it """ if self.append: f.__doc__ += + self.f.__doc__ else: f.__doc__ = self.f.__doc__ + f.__doc__ return f <REPLACE_END> <|endoftext|> """ Documentation-related helper classes/functions """ class soft_wraps: """ A Python decorator which concatenates two functions' docstrings: one function is defined at initialization and the other one is defined when soft_wraps is called. This helps reduce the ammount of documentation to write: one can use this decorator on child classes' functions when their implementation is similar to the one of the parent class. Conversely, if a function defined in a child class departs from its parent's implementation, one can simply explain the differences in a 'Notes' section without re-writing the whole docstring. Examples -------- >>> class Parent(object): ... def f(x): ... ''' ... Adds 1 to x ... ... Parameters ... ---------- ... x : int ... Variable to increment by 1 ... ... Returns ... ------- ... rval : int ... x incremented by 1 ... ''' ... rval = x + 1 ... return rval ... >>> class Child(Parent): ... @soft_wraps(Parent.f) ... def f(x): ... ''' ... Notes ... ----- ... Also prints the incremented value ... ''' ... rval = x + 1 ... print rval ... return rval ... >>> c = Child() >>> print c.f.__doc__ Adds 1 to x Parameters ---------- x : int Variable to increment by 1 Returns ------- rval : int x incremented by 1 Notes ----- Also prints the incremented value """ def __init__(self, f, append=False): """ Parameters ---------- f : function Function whose docstring will be concatenated with the decorated function's docstring prepend : bool, optional If True, appends f's docstring to the decorated function's docstring instead of prepending it. Defaults to False. """ self.f = f self.append = append def __call__(self, f): """ Prepend self.f's docstring to f's docstring (or append it if `self.append == True`). Parameters ---------- f : function Function to decorate Returns ------- f : function Function f passed as argument with self.f's docstring {pre,ap}pended to it """ if self.append: f.__doc__ += + self.f.__doc__ else: f.__doc__ = self.f.__doc__ + f.__doc__ return f
Add function decorator to improve functools.wraps
403f23ae486c14066e0a93c7deca91c5fbc15b87
plugins/brian.py
plugins/brian.py
"""Displays a randomly generated witticism from Brian Chu himself.""" import json import random __match__ = r"!brian" with open('plugins/brian_corpus/cache.json', 'r') as infile: cache = json.load(infile) with open('plugins/brian_corpus/phrases.json', 'r') as infile: phrases = json.load(infile) def generate_phrase(phrases, cache): seed_phrase = [] while len(seed_phrase) < 3: seed_phrase = random.choice(phrases).split() w1, w2 = seed_phrase[:2] chosen = [w1, w2] while "{}|{}".format(w1, w2) in cache: choice = random.choice(cache["{}|{}".format(w1, w2)]) w1, w2 = w2, choice chosen.append(choice) return ' '.join(chosen) def on_message(bot, channel, user, message): return '> {} ~brian'.format(generate_phrase(phrases, cache))
"""Displays a randomly generated witticism from Brian Chu himself.""" import json import random __match__ = r"!brian" attribution = [ "salad master", "esquire", "the one and only", "startup enthusiast", "boba king", "not-dictator", "normal citizen", "ping-pong expert" ] with open('plugins/brian_corpus/phrases.json', 'r') as infile: phrases = json.load(infile) with open('plugins/brian_corpus/cache.json', 'r') as infile: cache = json.load(infile) def generate_phrase(phrases, cache, max_length=40): seed_phrase = [] while len(seed_phrase) < 2: seed_phrase = random.choice(phrases).split() w1, = seed_phrase[:1] chosen = [w1] while w1 in cache and len(chosen)<max_length: w1 = random.choice(cache[w1]) chosen.append(w1) return ' '.join(chosen) def on_message(bot, channel, user, message): return '> {} ~ Brian Chu, {}'.format(generate_phrase(phrases, cache), random.choice(attribution))
Use bigrams in Markov chain generator
Use bigrams in Markov chain generator
Python
mit
kvchen/keffbot,kvchen/keffbot-py
<REPLACE_OLD> r"!brian" with <REPLACE_NEW> r"!brian" attribution = [ "salad master", "esquire", "the one and only", "startup enthusiast", "boba king", "not-dictator", "normal citizen", "ping-pong expert" ] with open('plugins/brian_corpus/phrases.json', 'r') as infile: phrases = json.load(infile) with <REPLACE_END> <DELETE> json.load(infile) with open('plugins/brian_corpus/phrases.json', 'r') as infile: phrases = <DELETE_END> <REPLACE_OLD> cache): <REPLACE_NEW> cache, max_length=40): <REPLACE_END> <REPLACE_OLD> 3: <REPLACE_NEW> 2: <REPLACE_END> <DELETE> w2 <DELETE_END> <REPLACE_OLD> seed_phrase[:2] <REPLACE_NEW> seed_phrase[:1] <REPLACE_END> <REPLACE_OLD> [w1, w2] <REPLACE_NEW> [w1] <REPLACE_END> <REPLACE_OLD> "{}|{}".format(w1, w2) <REPLACE_NEW> w1 <REPLACE_END> <REPLACE_OLD> cache: <REPLACE_NEW> cache and len(chosen)<max_length: <REPLACE_END> <REPLACE_OLD> choice <REPLACE_NEW> w1 <REPLACE_END> <REPLACE_OLD> random.choice(cache["{}|{}".format(w1, w2)]) <REPLACE_NEW> random.choice(cache[w1]) <REPLACE_END> <REPLACE_OLD> w1, w2 = w2, choice chosen.append(choice) <REPLACE_NEW> chosen.append(w1) <REPLACE_END> <REPLACE_OLD> ~brian'.format(generate_phrase(phrases, cache)) <REPLACE_NEW> ~ Brian Chu, {}'.format(generate_phrase(phrases, cache), random.choice(attribution)) <REPLACE_END> <|endoftext|> """Displays a randomly generated witticism from Brian Chu himself.""" import json import random __match__ = r"!brian" attribution = [ "salad master", "esquire", "the one and only", "startup enthusiast", "boba king", "not-dictator", "normal citizen", "ping-pong expert" ] with open('plugins/brian_corpus/phrases.json', 'r') as infile: phrases = json.load(infile) with open('plugins/brian_corpus/cache.json', 'r') as infile: cache = json.load(infile) def generate_phrase(phrases, cache, max_length=40): seed_phrase = [] while len(seed_phrase) < 2: seed_phrase = random.choice(phrases).split() w1, = seed_phrase[:1] chosen = [w1] while w1 in cache and len(chosen)<max_length: w1 = random.choice(cache[w1]) chosen.append(w1) return ' '.join(chosen) def on_message(bot, channel, user, message): return '> {} ~ Brian Chu, {}'.format(generate_phrase(phrases, cache), random.choice(attribution))
Use bigrams in Markov chain generator """Displays a randomly generated witticism from Brian Chu himself.""" import json import random __match__ = r"!brian" with open('plugins/brian_corpus/cache.json', 'r') as infile: cache = json.load(infile) with open('plugins/brian_corpus/phrases.json', 'r') as infile: phrases = json.load(infile) def generate_phrase(phrases, cache): seed_phrase = [] while len(seed_phrase) < 3: seed_phrase = random.choice(phrases).split() w1, w2 = seed_phrase[:2] chosen = [w1, w2] while "{}|{}".format(w1, w2) in cache: choice = random.choice(cache["{}|{}".format(w1, w2)]) w1, w2 = w2, choice chosen.append(choice) return ' '.join(chosen) def on_message(bot, channel, user, message): return '> {} ~brian'.format(generate_phrase(phrases, cache))
c395da80c02b6c39514fcc46a7b951c71ae2c12b
usingnamespace/api/views/v1/root.py
usingnamespace/api/views/v1/root.py
from pyramid.view import view_config from ....views.finalisecontext import FinaliseContext class APIV1(FinaliseContext): @view_config(context='...traversal.v1.Root', route_name='api', renderer='json') def main(self): sites = [] for site in self.context.sites: sites.append( { 'id': site.id, 'title': site.title, 'tagline': site.tagline, } ) return { 'sites': sites, }
Add view for API v1 Root
Add view for API v1 Root This sends back a JSON response contain sites with ID's as well as title/tagline.
Python
isc
usingnamespace/usingnamespace
<REPLACE_OLD> <REPLACE_NEW> from pyramid.view import view_config from ....views.finalisecontext import FinaliseContext class APIV1(FinaliseContext): @view_config(context='...traversal.v1.Root', route_name='api', renderer='json') def main(self): sites = [] for site in self.context.sites: sites.append( { 'id': site.id, 'title': site.title, 'tagline': site.tagline, } ) return { 'sites': sites, } <REPLACE_END> <|endoftext|> from pyramid.view import view_config from ....views.finalisecontext import FinaliseContext class APIV1(FinaliseContext): @view_config(context='...traversal.v1.Root', route_name='api', renderer='json') def main(self): sites = [] for site in self.context.sites: sites.append( { 'id': site.id, 'title': site.title, 'tagline': site.tagline, } ) return { 'sites': sites, }
Add view for API v1 Root This sends back a JSON response contain sites with ID's as well as title/tagline.
4ed64168541993e2ea85e7ea47139550d1daa206
backdrop/write/config/development_environment_sample.py
backdrop/write/config/development_environment_sample.py
# Copy this file to development_environment.py # and replace OAuth credentials your dev credentials TOKENS = { '_foo_bucket': '_foo_bucket-bearer-token', 'bucket': 'bucket-bearer-token', 'foo': 'foo-bearer-token', 'foo_bucket': 'foo_bucket-bearer-token', 'licensing': 'licensing-bearer-token', 'licensing_journey': 'licensing_journey-bearer-token' } PERMISSIONS = {} OAUTH_CLIENT_ID = \ "1759c91cdc926eebe5d5c9fce53a58170ad17ba30a22b4b451c377a339a98844" OAUTH_CLIENT_SECRET = \ "8f205218c0a378e33dccae5a557b4cac766f343a7dbfcb50de2286f03db4273a" OAUTH_BASE_URL = "http://signon.dev.gov.uk"
# Copy this file to development_environment.py # and replace OAuth credentials your dev credentials TOKENS = { '_foo_bucket': '_foo_bucket-bearer-token', 'bucket': 'bucket-bearer-token', 'foo': 'foo-bearer-token', 'foo_bucket': 'foo_bucket-bearer-token', 'licensing': 'licensing-bearer-token', 'licensing_journey': 'licensing_journey-bearer-token', 'licensing_live_data': 'licensing_live_data_bearer_token', 'licence_finder_live_data': 'licence_finder_live_data_bearer_token' } PERMISSIONS = {} OAUTH_CLIENT_ID = \ "1759c91cdc926eebe5d5c9fce53a58170ad17ba30a22b4b451c377a339a98844" OAUTH_CLIENT_SECRET = \ "8f205218c0a378e33dccae5a557b4cac766f343a7dbfcb50de2286f03db4273a" OAUTH_BASE_URL = "http://signon.dev.gov.uk"
Add buckets for collecting pingdom data
Add buckets for collecting pingdom data
Python
mit
alphagov/backdrop,alphagov/backdrop,alphagov/backdrop
<REPLACE_OLD> 'licensing_journey-bearer-token' } PERMISSIONS <REPLACE_NEW> 'licensing_journey-bearer-token', 'licensing_live_data': 'licensing_live_data_bearer_token', 'licence_finder_live_data': 'licence_finder_live_data_bearer_token' } PERMISSIONS <REPLACE_END> <|endoftext|> # Copy this file to development_environment.py # and replace OAuth credentials your dev credentials TOKENS = { '_foo_bucket': '_foo_bucket-bearer-token', 'bucket': 'bucket-bearer-token', 'foo': 'foo-bearer-token', 'foo_bucket': 'foo_bucket-bearer-token', 'licensing': 'licensing-bearer-token', 'licensing_journey': 'licensing_journey-bearer-token', 'licensing_live_data': 'licensing_live_data_bearer_token', 'licence_finder_live_data': 'licence_finder_live_data_bearer_token' } PERMISSIONS = {} OAUTH_CLIENT_ID = \ "1759c91cdc926eebe5d5c9fce53a58170ad17ba30a22b4b451c377a339a98844" OAUTH_CLIENT_SECRET = \ "8f205218c0a378e33dccae5a557b4cac766f343a7dbfcb50de2286f03db4273a" OAUTH_BASE_URL = "http://signon.dev.gov.uk"
Add buckets for collecting pingdom data # Copy this file to development_environment.py # and replace OAuth credentials your dev credentials TOKENS = { '_foo_bucket': '_foo_bucket-bearer-token', 'bucket': 'bucket-bearer-token', 'foo': 'foo-bearer-token', 'foo_bucket': 'foo_bucket-bearer-token', 'licensing': 'licensing-bearer-token', 'licensing_journey': 'licensing_journey-bearer-token' } PERMISSIONS = {} OAUTH_CLIENT_ID = \ "1759c91cdc926eebe5d5c9fce53a58170ad17ba30a22b4b451c377a339a98844" OAUTH_CLIENT_SECRET = \ "8f205218c0a378e33dccae5a557b4cac766f343a7dbfcb50de2286f03db4273a" OAUTH_BASE_URL = "http://signon.dev.gov.uk"
9f64d5e2f9447233df8d3b841c519196c3213e05
pyflation/analysis/tests/test_deltaprel.py
pyflation/analysis/tests/test_deltaprel.py
''' test_deltaprel - Test functions for deltaprel module Author: Ian Huston For license and copyright information see LICENSE.txt which was distributed with this file. ''' import numpy as np from numpy.testing import assert_, assert_raises from pyflation.analysis import deltaprel import nose class TestSoundSpeeds(): def setup(self): self.Vphi = np.arange(24).reshape((4,3,2)) self.phidot = self.Vphi self.H = np.arange(8).reshape((4,1,2)) def test_shape(self): """Test whether the soundspeeds are shaped correctly.""" arr = deltaprel.soundspeeds(self.Vphi, self.phidot, self.H) assert_(arr.shape == self.Vphi.shape) def test_calc(self): """Test results of calculation.""" arr = deltaprel.soundspeeds(self.Vphi, self.phidot, self.H) def test_wrongshape(self): """Test that wrong shapes raise exception.""" self.H = np.arange(8).reshape((4,2)) assert_raises(ValueError, deltaprel.soundspeeds, self.Vphi, self.phidot, self.H)
''' test_deltaprel - Test functions for deltaprel module Author: Ian Huston For license and copyright information see LICENSE.txt which was distributed with this file. ''' import numpy as np from numpy.testing import assert_, assert_raises from pyflation.analysis import deltaprel import nose class TestSoundSpeeds(): def setup(self): self.Vphi = np.arange(24).reshape((4,3,2)) self.phidot = self.Vphi self.H = np.arange(8).reshape((4,1,2)) def test_shape(self): """Test whether the soundspeeds are shaped correctly.""" arr = deltaprel.soundspeeds(self.Vphi, self.phidot, self.H) assert_(arr.shape == self.Vphi.shape) def test_scalar(self): """Test results of 1x1x1 calculation.""" arr = deltaprel.soundspeeds(3, 2, 0.5) assert_(arr == 3) def test_wrongshape(self): """Test that wrong shapes raise exception.""" self.H = np.arange(8).reshape((4,2)) assert_raises(ValueError, deltaprel.soundspeeds, self.Vphi, self.phidot, self.H)
Add test for scalar values.
Add test for scalar values.
Python
bsd-3-clause
ihuston/pyflation,ihuston/pyflation
<REPLACE_OLD> test_calc(self): <REPLACE_NEW> test_scalar(self): <REPLACE_END> <INSERT> 1x1x1 <INSERT_END> <REPLACE_OLD> deltaprel.soundspeeds(self.Vphi, self.phidot, self.H) <REPLACE_NEW> deltaprel.soundspeeds(3, 2, 0.5) assert_(arr == 3) <REPLACE_END> <|endoftext|> ''' test_deltaprel - Test functions for deltaprel module Author: Ian Huston For license and copyright information see LICENSE.txt which was distributed with this file. ''' import numpy as np from numpy.testing import assert_, assert_raises from pyflation.analysis import deltaprel import nose class TestSoundSpeeds(): def setup(self): self.Vphi = np.arange(24).reshape((4,3,2)) self.phidot = self.Vphi self.H = np.arange(8).reshape((4,1,2)) def test_shape(self): """Test whether the soundspeeds are shaped correctly.""" arr = deltaprel.soundspeeds(self.Vphi, self.phidot, self.H) assert_(arr.shape == self.Vphi.shape) def test_scalar(self): """Test results of 1x1x1 calculation.""" arr = deltaprel.soundspeeds(3, 2, 0.5) assert_(arr == 3) def test_wrongshape(self): """Test that wrong shapes raise exception.""" self.H = np.arange(8).reshape((4,2)) assert_raises(ValueError, deltaprel.soundspeeds, self.Vphi, self.phidot, self.H)
Add test for scalar values. ''' test_deltaprel - Test functions for deltaprel module Author: Ian Huston For license and copyright information see LICENSE.txt which was distributed with this file. ''' import numpy as np from numpy.testing import assert_, assert_raises from pyflation.analysis import deltaprel import nose class TestSoundSpeeds(): def setup(self): self.Vphi = np.arange(24).reshape((4,3,2)) self.phidot = self.Vphi self.H = np.arange(8).reshape((4,1,2)) def test_shape(self): """Test whether the soundspeeds are shaped correctly.""" arr = deltaprel.soundspeeds(self.Vphi, self.phidot, self.H) assert_(arr.shape == self.Vphi.shape) def test_calc(self): """Test results of calculation.""" arr = deltaprel.soundspeeds(self.Vphi, self.phidot, self.H) def test_wrongshape(self): """Test that wrong shapes raise exception.""" self.H = np.arange(8).reshape((4,2)) assert_raises(ValueError, deltaprel.soundspeeds, self.Vphi, self.phidot, self.H)
55c183ad234ec53e2c7ba82e9e19793564373200
comics/comics/dieselsweetiesweb.py
comics/comics/dieselsweetiesweb.py
from comics.aggregator.crawler import CrawlerBase, CrawlerImage from comics.meta.base import MetaBase class Meta(MetaBase): name = 'Diesel Sweeties (web)' language = 'en' url = 'http://www.dieselsweeties.com/' start_date = '2000-01-01' rights = 'Richard Stevens' class Crawler(CrawlerBase): history_capable_date = '2000-01-01' schedule = 'Mo,Tu,We,Th,Fr' time_zone = -5 def crawl(self, pub_date): feed = self.parse_feed('http://www.dieselsweeties.com/ds-unifeed.xml') for entry in feed.for_date(pub_date): if not entry.summary: continue url = entry.summary.src('img[src*="/strips/"]') title = entry.title text = entry.summary.alt('img[src*="/strips/"]') return CrawlerImage(url, title, text)
from comics.aggregator.crawler import CrawlerBase, CrawlerImage from comics.meta.base import MetaBase class Meta(MetaBase): name = 'Diesel Sweeties (web)' language = 'en' url = 'http://www.dieselsweeties.com/' start_date = '2000-01-01' rights = 'Richard Stevens' class Crawler(CrawlerBase): history_capable_date = '2000-01-01' schedule = 'Mo,Tu,We,Th,Fr' time_zone = -5 def crawl(self, pub_date): feed = self.parse_feed('http://www.dieselsweeties.com/ds-unifeed.xml') for entry in feed.for_date(pub_date): if not hasattr(entry, 'summary'): continue url = entry.summary.src('img[src*="/strips/"]') title = entry.title text = entry.summary.alt('img[src*="/strips/"]') return CrawlerImage(url, title, text)
Check if field exists, not if it's empty
Check if field exists, not if it's empty
Python
agpl-3.0
jodal/comics,jodal/comics,datagutten/comics,datagutten/comics,datagutten/comics,jodal/comics,jodal/comics,datagutten/comics
<REPLACE_OLD> entry.summary: <REPLACE_NEW> hasattr(entry, 'summary'): <REPLACE_END> <|endoftext|> from comics.aggregator.crawler import CrawlerBase, CrawlerImage from comics.meta.base import MetaBase class Meta(MetaBase): name = 'Diesel Sweeties (web)' language = 'en' url = 'http://www.dieselsweeties.com/' start_date = '2000-01-01' rights = 'Richard Stevens' class Crawler(CrawlerBase): history_capable_date = '2000-01-01' schedule = 'Mo,Tu,We,Th,Fr' time_zone = -5 def crawl(self, pub_date): feed = self.parse_feed('http://www.dieselsweeties.com/ds-unifeed.xml') for entry in feed.for_date(pub_date): if not hasattr(entry, 'summary'): continue url = entry.summary.src('img[src*="/strips/"]') title = entry.title text = entry.summary.alt('img[src*="/strips/"]') return CrawlerImage(url, title, text)
Check if field exists, not if it's empty from comics.aggregator.crawler import CrawlerBase, CrawlerImage from comics.meta.base import MetaBase class Meta(MetaBase): name = 'Diesel Sweeties (web)' language = 'en' url = 'http://www.dieselsweeties.com/' start_date = '2000-01-01' rights = 'Richard Stevens' class Crawler(CrawlerBase): history_capable_date = '2000-01-01' schedule = 'Mo,Tu,We,Th,Fr' time_zone = -5 def crawl(self, pub_date): feed = self.parse_feed('http://www.dieselsweeties.com/ds-unifeed.xml') for entry in feed.for_date(pub_date): if not entry.summary: continue url = entry.summary.src('img[src*="/strips/"]') title = entry.title text = entry.summary.alt('img[src*="/strips/"]') return CrawlerImage(url, title, text)
65d5f4f3947b115421f273b7edb22420035c3ca3
obfsproxy/common/modexp.py
obfsproxy/common/modexp.py
import gmpy def powMod( x, y, mod ): """ Efficiently calculate and return `x' to the power of `y' mod `mod'. Before the modular exponentiation, the three numbers are converted to GMPY's bignum representation which speeds up exponentiation. """ x = gmpy.mpz(x) y = gmpy.mpz(y) mod = gmpy.mpz(mod) return pow(x, y, mod)
Add function for fast modular exponentiation.
Add function for fast modular exponentiation. The function uses GMPY's bignum arithmetic which speeds up the calculation.
Python
bsd-3-clause
qdzheng/obfsproxy,infinity0/obfsproxy,catinred2/obfsproxy,NullHypothesis/obfsproxy,isislovecruft/obfsproxy,Yawning/obfsproxy,masterkorp/obfsproxy,Yawning/obfsproxy-wfpadtools,sunsong/obfsproxy,david415/obfsproxy
<INSERT> import gmpy def powMod( x, y, mod ): <INSERT_END> <INSERT> """ Efficiently calculate and return `x' to the power of `y' mod `mod'. Before the modular exponentiation, the three numbers are converted to GMPY's bignum representation which speeds up exponentiation. """ x = gmpy.mpz(x) y = gmpy.mpz(y) mod = gmpy.mpz(mod) return pow(x, y, mod) <INSERT_END> <|endoftext|> import gmpy def powMod( x, y, mod ): """ Efficiently calculate and return `x' to the power of `y' mod `mod'. Before the modular exponentiation, the three numbers are converted to GMPY's bignum representation which speeds up exponentiation. """ x = gmpy.mpz(x) y = gmpy.mpz(y) mod = gmpy.mpz(mod) return pow(x, y, mod)
Add function for fast modular exponentiation. The function uses GMPY's bignum arithmetic which speeds up the calculation.
603c36aec2a4704bb4cf41c224194a5f83f9babe
sale_payment_method_automatic_workflow/__openerp__.py
sale_payment_method_automatic_workflow/__openerp__.py
# -*- coding: utf-8 -*- ############################################################################## # # Author: Guewen Baconnier # Copyright 2015 Camptocamp SA # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## {'name': 'Sale Payment Method - Automatic Reconcile', 'version': '1.0', 'author': ['Camptocamp', 'Akretion'], 'license': 'AGPL-3', 'category': 'Generic Modules/Others', 'depends': ['sale_payment_method', 'sale_automatic_workflow'], 'website': 'http://www.camptocamp.com', 'data': [], 'test': [], 'installable': True, 'auto_install': False, }
# -*- coding: utf-8 -*- ############################################################################## # # Author: Guewen Baconnier # Copyright 2015 Camptocamp SA # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## {'name': 'Sale Payment Method - Automatic Reconcile', 'version': '1.0', 'author': ['Camptocamp', 'Akretion'], 'license': 'AGPL-3', 'category': 'Generic Modules/Others', 'depends': ['sale_payment_method', 'sale_automatic_workflow'], 'website': 'http://www.camptocamp.com', 'data': [], 'test': [], 'installable': True, 'auto_install': True, }
Set the module as auto_install
Set the module as auto_install So it installs when both sale_payment_method and sale_automatic_workflow are installed. This module acts as the glue between them
Python
agpl-3.0
BT-ojossen/e-commerce,Antiun/e-commerce,raycarnes/e-commerce,jt-xx/e-commerce,gurneyalex/e-commerce,BT-ojossen/e-commerce,Endika/e-commerce,damdam-s/e-commerce,vauxoo-dev/e-commerce,charbeljc/e-commerce,brain-tec/e-commerce,BT-jmichaud/e-commerce,Endika/e-commerce,brain-tec/e-commerce,JayVora-SerpentCS/e-commerce,fevxie/e-commerce,JayVora-SerpentCS/e-commerce,jt-xx/e-commerce,Antiun/e-commerce,raycarnes/e-commerce,cloud9UG/e-commerce,vauxoo-dev/e-commerce,BT-fgarbely/e-commerce
<REPLACE_OLD> False, <REPLACE_NEW> True, <REPLACE_END> <|endoftext|> # -*- coding: utf-8 -*- ############################################################################## # # Author: Guewen Baconnier # Copyright 2015 Camptocamp SA # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## {'name': 'Sale Payment Method - Automatic Reconcile', 'version': '1.0', 'author': ['Camptocamp', 'Akretion'], 'license': 'AGPL-3', 'category': 'Generic Modules/Others', 'depends': ['sale_payment_method', 'sale_automatic_workflow'], 'website': 'http://www.camptocamp.com', 'data': [], 'test': [], 'installable': True, 'auto_install': True, }
Set the module as auto_install So it installs when both sale_payment_method and sale_automatic_workflow are installed. This module acts as the glue between them # -*- coding: utf-8 -*- ############################################################################## # # Author: Guewen Baconnier # Copyright 2015 Camptocamp SA # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## {'name': 'Sale Payment Method - Automatic Reconcile', 'version': '1.0', 'author': ['Camptocamp', 'Akretion'], 'license': 'AGPL-3', 'category': 'Generic Modules/Others', 'depends': ['sale_payment_method', 'sale_automatic_workflow'], 'website': 'http://www.camptocamp.com', 'data': [], 'test': [], 'installable': True, 'auto_install': False, }
5e53f1e86fc7c4f1c7b42479684ac393c997ce52
client/test/test-unrealcv.py
client/test/test-unrealcv.py
# TODO: Test robustness, test speed import unittest, time, sys from common_conf import * from test_server import EchoServer, MessageServer import argparse import threading from test_server import TestMessageServer from test_client import TestClientWithDummyServer from test_commands import TestCommands from test_realistic_rendering import TestRealisticRendering if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--travis', action='store_true') # Only run test availabe to travis CI args = parser.parse_args() suites = [] load = unittest.TestLoader().loadTestsFromTestCase s = load(TestMessageServer); suites.append(s) s = load(TestClientWithDummyServer); suites.append(s) if not args.travis: s = load(TestCommands); suites.append(s) s = load(TestRealisticRendering); suites.append(s) suite_obj = unittest.TestSuite(suites) unittest.TextTestRunner(verbosity = 2).run(suite_obj)
# TODO: Test robustness, test speed import unittest, time, sys from common_conf import * from test_server import EchoServer, MessageServer import argparse import threading from test_server import TestMessageServer from test_client import TestClientWithDummyServer from test_commands import TestCommands from test_realistic_rendering import TestRealisticRendering if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--travis', action='store_true') # Only run test availabe to travis CI args = parser.parse_args() suites = [] load = unittest.TestLoader().loadTestsFromTestCase s = load(TestMessageServer); suites.append(s) s = load(TestClientWithDummyServer); suites.append(s) if not args.travis: s = load(TestCommands); suites.append(s) s = load(TestRealisticRendering); suites.append(s) suite_obj = unittest.TestSuite(suites) ret = not unittest.TextTestRunner(verbosity = 2).run(suite_obj).wasSucessful() sys.exit(ret)
Fix exit code of unittest.
Fix exit code of unittest.
Python
mit
qiuwch/unrealcv,qiuwch/unrealcv,unrealcv/unrealcv,qiuwch/unrealcv,qiuwch/unrealcv,qiuwch/unrealcv,qiuwch/unrealcv,unrealcv/unrealcv,unrealcv/unrealcv,unrealcv/unrealcv,unrealcv/unrealcv
<INSERT> ret = not <INSERT_END> <REPLACE_OLD> 2).run(suite_obj) <REPLACE_NEW> 2).run(suite_obj).wasSucessful() sys.exit(ret) <REPLACE_END> <|endoftext|> # TODO: Test robustness, test speed import unittest, time, sys from common_conf import * from test_server import EchoServer, MessageServer import argparse import threading from test_server import TestMessageServer from test_client import TestClientWithDummyServer from test_commands import TestCommands from test_realistic_rendering import TestRealisticRendering if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--travis', action='store_true') # Only run test availabe to travis CI args = parser.parse_args() suites = [] load = unittest.TestLoader().loadTestsFromTestCase s = load(TestMessageServer); suites.append(s) s = load(TestClientWithDummyServer); suites.append(s) if not args.travis: s = load(TestCommands); suites.append(s) s = load(TestRealisticRendering); suites.append(s) suite_obj = unittest.TestSuite(suites) ret = not unittest.TextTestRunner(verbosity = 2).run(suite_obj).wasSucessful() sys.exit(ret)
Fix exit code of unittest. # TODO: Test robustness, test speed import unittest, time, sys from common_conf import * from test_server import EchoServer, MessageServer import argparse import threading from test_server import TestMessageServer from test_client import TestClientWithDummyServer from test_commands import TestCommands from test_realistic_rendering import TestRealisticRendering if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--travis', action='store_true') # Only run test availabe to travis CI args = parser.parse_args() suites = [] load = unittest.TestLoader().loadTestsFromTestCase s = load(TestMessageServer); suites.append(s) s = load(TestClientWithDummyServer); suites.append(s) if not args.travis: s = load(TestCommands); suites.append(s) s = load(TestRealisticRendering); suites.append(s) suite_obj = unittest.TestSuite(suites) unittest.TextTestRunner(verbosity = 2).run(suite_obj)
706dbcc5208cdebe616e387b280aa7411d4bdc42
setup.py
setup.py
# coding: utf-8 from setuptools import setup, find_packages setup( name = 'thumbor_aws', version = "1", description = 'Thumbor AWS extensions', author = 'William King', author_email = '[email protected]', zip_safe = False, include_package_data = True, packages=find_packages(), install_requires=['py-dateutil','thumbor','boto'] )
# coding: utf-8 from setuptools import setup, find_packages setup( name = 'thumbor_aws', version = "1", description = 'Thumbor AWS extensions', author = 'William King', author_email = '[email protected]', zip_safe = False, include_package_data = True, packages=find_packages(), install_requires=['python-dateutil','thumbor','boto'] )
Use python-dateutil instead of py-dateutil
Use python-dateutil instead of py-dateutil
Python
mit
voxmedia/aws,bob3000/thumbor_aws,pgr0ss/aws,andrew-a-dev/aws,aoqfonseca/aws,tsauzeau/aws,thumbor-community/aws,guilhermef/aws,ScrunchEnterprises/thumbor_aws,abaldwin1/tc_aws
<REPLACE_OLD> True, packages=find_packages(), install_requires=['py-dateutil','thumbor','boto'] ) <REPLACE_NEW> True, packages=find_packages(), install_requires=['python-dateutil','thumbor','boto'] ) <REPLACE_END> <|endoftext|> # coding: utf-8 from setuptools import setup, find_packages setup( name = 'thumbor_aws', version = "1", description = 'Thumbor AWS extensions', author = 'William King', author_email = '[email protected]', zip_safe = False, include_package_data = True, packages=find_packages(), install_requires=['python-dateutil','thumbor','boto'] )
Use python-dateutil instead of py-dateutil # coding: utf-8 from setuptools import setup, find_packages setup( name = 'thumbor_aws', version = "1", description = 'Thumbor AWS extensions', author = 'William King', author_email = '[email protected]', zip_safe = False, include_package_data = True, packages=find_packages(), install_requires=['py-dateutil','thumbor','boto'] )
6f148fb1bb047b4977c8fcd1d898c231bed3fc9d
indra/tests/test_dart_client.py
indra/tests/test_dart_client.py
import json from indra.literature.dart_client import jsonify_query_data def test_timestamp(): # Should ignore "after" assert jsonify_query_data(timestamp={'on': '2020-01-01', 'after': '2020-01-02'}) == \ json.dumps({"timestamp": {"on": "2020-01-01"}}) assert jsonify_query_data(timestamp={'after': '2020-01-01', 'before': '2020-01-05'}) == \ json.dumps( {'timestamp': {'after': '2020-01-01', 'before': '2020-01-05'}}) def test_lists(): # Check lists, ignore the lists that have non-str objects assert jsonify_query_data(readers=['hume', 123456], versions=['123', '456']) ==\ json.dumps({'versions': ['123', '456']})
Add two tests for dart client
Add two tests for dart client
Python
bsd-2-clause
sorgerlab/belpy,sorgerlab/indra,sorgerlab/belpy,bgyori/indra,johnbachman/belpy,sorgerlab/indra,bgyori/indra,sorgerlab/indra,bgyori/indra,sorgerlab/belpy,johnbachman/belpy,johnbachman/indra,johnbachman/indra,johnbachman/belpy,johnbachman/indra
<REPLACE_OLD> <REPLACE_NEW> import json from indra.literature.dart_client import jsonify_query_data def test_timestamp(): # Should ignore "after" assert jsonify_query_data(timestamp={'on': '2020-01-01', 'after': '2020-01-02'}) == \ json.dumps({"timestamp": {"on": "2020-01-01"}}) assert jsonify_query_data(timestamp={'after': '2020-01-01', 'before': '2020-01-05'}) == \ json.dumps( {'timestamp': {'after': '2020-01-01', 'before': '2020-01-05'}}) def test_lists(): # Check lists, ignore the lists that have non-str objects assert jsonify_query_data(readers=['hume', 123456], versions=['123', '456']) ==\ json.dumps({'versions': ['123', '456']}) <REPLACE_END> <|endoftext|> import json from indra.literature.dart_client import jsonify_query_data def test_timestamp(): # Should ignore "after" assert jsonify_query_data(timestamp={'on': '2020-01-01', 'after': '2020-01-02'}) == \ json.dumps({"timestamp": {"on": "2020-01-01"}}) assert jsonify_query_data(timestamp={'after': '2020-01-01', 'before': '2020-01-05'}) == \ json.dumps( {'timestamp': {'after': '2020-01-01', 'before': '2020-01-05'}}) def test_lists(): # Check lists, ignore the lists that have non-str objects assert jsonify_query_data(readers=['hume', 123456], versions=['123', '456']) ==\ json.dumps({'versions': ['123', '456']})
Add two tests for dart client
e2fa4b150546be4b4f0ae59f18ef6ba2b6180d1a
accounts/serializers.py
accounts/serializers.py
"""Serializers for account models""" # pylint: disable=too-few-public-methods from rest_framework import serializers from accounts.models import User class UserSerializer(serializers.ModelSerializer): """Serializer for Users""" class Meta: """Model and field definitions""" model = User fields = ( 'id', 'username', 'email', 'website', 'avatar', 'steamid', 'is_staff', )
"""Serializers for account models""" # pylint: disable=too-few-public-methods from rest_framework import serializers from accounts.models import User class UserSerializer(serializers.ModelSerializer): """Serializer for Users""" class Meta: """Model and field definitions""" model = User fields = ( "id", "username", "email", "website", "avatar_url", "steamid", "is_staff", )
Change avatar to avatar_url in the user API
Change avatar to avatar_url in the user API
Python
agpl-3.0
lutris/website,lutris/website,lutris/website,lutris/website
<REPLACE_OLD> definitions""" <REPLACE_NEW> definitions""" <REPLACE_END> <REPLACE_OLD> 'id', <REPLACE_NEW> "id", <REPLACE_END> <REPLACE_OLD> 'username', <REPLACE_NEW> "username", <REPLACE_END> <REPLACE_OLD> 'email', <REPLACE_NEW> "email", <REPLACE_END> <REPLACE_OLD> 'website', <REPLACE_NEW> "website", <REPLACE_END> <REPLACE_OLD> 'avatar', <REPLACE_NEW> "avatar_url", <REPLACE_END> <REPLACE_OLD> 'steamid', <REPLACE_NEW> "steamid", <REPLACE_END> <REPLACE_OLD> 'is_staff', <REPLACE_NEW> "is_staff", <REPLACE_END> <|endoftext|> """Serializers for account models""" # pylint: disable=too-few-public-methods from rest_framework import serializers from accounts.models import User class UserSerializer(serializers.ModelSerializer): """Serializer for Users""" class Meta: """Model and field definitions""" model = User fields = ( "id", "username", "email", "website", "avatar_url", "steamid", "is_staff", )
Change avatar to avatar_url in the user API """Serializers for account models""" # pylint: disable=too-few-public-methods from rest_framework import serializers from accounts.models import User class UserSerializer(serializers.ModelSerializer): """Serializer for Users""" class Meta: """Model and field definitions""" model = User fields = ( 'id', 'username', 'email', 'website', 'avatar', 'steamid', 'is_staff', )
d791b593dbf3d6505bf9eac8766aaf0b7f22c721
launch_instance.py
launch_instance.py
# License under the MIT License - see LICENSE import boto.ec2 import os import time def launch(key_name=None, region='us-west-2', image_id='ami-5189a661', instance_type='t2.micro', security_groups='launch-wizard-1', user_data=None, initial_check=True): ''' ''' if not isinstance(security_groups, list): security_groups = [security_groups] ec2 = boto.ec2.connect_to_region(region) reserve = ec2.run_instances(image_id, key_name=key_name, instance_type=instance_type, security_groups=security_groups, user_data=user_data) inst = reserve.instances[0] while inst.state == u'pending': time.sleep(10) inst.update() if initial_check: # Wait for the status checks first status = ec2.get_all_instance_status(instance_ids=[inst.id])[0] check_stat = "Status:initializing" while str(status.system_status) == check_stat and str(status.instance_status) == check_stat: time.sleep(10) status = ec2.get_all_instance_status(instance_ids=[inst.id])[0] return inst # ec2.get_instance_attribute('i-336b69f6', 'instanceType')
# License under the MIT License - see LICENSE import boto.ec2 import os import time def launch(key_name=None, region='us-west-2', image_id='ami-5189a661', instance_type='t2.micro', security_groups='launch-wizard-1', user_data=None, initial_check=False): ''' ''' if not isinstance(security_groups, list): security_groups = [security_groups] ec2 = boto.ec2.connect_to_region(region) reserve = ec2.run_instances(image_id, key_name=key_name, instance_type=instance_type, security_groups=security_groups, user_data=user_data) inst = reserve.instances[0] while inst.state == u'pending': time.sleep(10) inst.update() if initial_check: # Wait for the status checks first status = ec2.get_all_instance_status(instance_ids=[inst.id])[0] check_stat = "Status:initializing" while str(status.system_status) == check_stat and str(status.instance_status) == check_stat: time.sleep(10) status = ec2.get_all_instance_status(instance_ids=[inst.id])[0] return inst # ec2.get_instance_attribute('i-336b69f6', 'instanceType')
Disable the extra check by default
Disable the extra check by default
Python
mit
Astroua/aws_controller,Astroua/aws_controller
<REPLACE_OLD> initial_check=True): <REPLACE_NEW> initial_check=False): <REPLACE_END> <|endoftext|> # License under the MIT License - see LICENSE import boto.ec2 import os import time def launch(key_name=None, region='us-west-2', image_id='ami-5189a661', instance_type='t2.micro', security_groups='launch-wizard-1', user_data=None, initial_check=False): ''' ''' if not isinstance(security_groups, list): security_groups = [security_groups] ec2 = boto.ec2.connect_to_region(region) reserve = ec2.run_instances(image_id, key_name=key_name, instance_type=instance_type, security_groups=security_groups, user_data=user_data) inst = reserve.instances[0] while inst.state == u'pending': time.sleep(10) inst.update() if initial_check: # Wait for the status checks first status = ec2.get_all_instance_status(instance_ids=[inst.id])[0] check_stat = "Status:initializing" while str(status.system_status) == check_stat and str(status.instance_status) == check_stat: time.sleep(10) status = ec2.get_all_instance_status(instance_ids=[inst.id])[0] return inst # ec2.get_instance_attribute('i-336b69f6', 'instanceType')
Disable the extra check by default # License under the MIT License - see LICENSE import boto.ec2 import os import time def launch(key_name=None, region='us-west-2', image_id='ami-5189a661', instance_type='t2.micro', security_groups='launch-wizard-1', user_data=None, initial_check=True): ''' ''' if not isinstance(security_groups, list): security_groups = [security_groups] ec2 = boto.ec2.connect_to_region(region) reserve = ec2.run_instances(image_id, key_name=key_name, instance_type=instance_type, security_groups=security_groups, user_data=user_data) inst = reserve.instances[0] while inst.state == u'pending': time.sleep(10) inst.update() if initial_check: # Wait for the status checks first status = ec2.get_all_instance_status(instance_ids=[inst.id])[0] check_stat = "Status:initializing" while str(status.system_status) == check_stat and str(status.instance_status) == check_stat: time.sleep(10) status = ec2.get_all_instance_status(instance_ids=[inst.id])[0] return inst # ec2.get_instance_attribute('i-336b69f6', 'instanceType')
d08b0a11176225302b01657e9d396066923ae7ce
polyfilter.py
polyfilter.py
#!/usr/bin/python # Polyfilter - filters polygon files for very small polygons import sys f = open(sys.argv[1]) def PolygonArea(corners): n = len(corners) # of corners area = 0.0 for i in range(n): j = (i + 1) % n area += corners[i][0] * corners[j][1] area -= corners[j][0] * corners[i][1] area = abs(area) / 2.0 return area allAreas = [] maxArea = 0 while True: line = f.readline().strip() if line=="": break points = line.split(" ") corners = [] for p in points: ip = map(float,p.split(",")) corners.append(ip) area = PolygonArea(corners) if (area > 500): print " ".join(points)
Add filter program to remove small polygons
Add filter program to remove small polygons
Python
mit
jmacarthur/ld34,jmacarthur/ld34,jmacarthur/ld34
<INSERT> #!/usr/bin/python # Polyfilter - filters polygon files for very small polygons import sys f = open(sys.argv[1]) def PolygonArea(corners): <INSERT_END> <INSERT> n = len(corners) # of corners area = 0.0 for i in range(n): j = (i + 1) % n area += corners[i][0] * corners[j][1] area -= corners[j][0] * corners[i][1] area = abs(area) / 2.0 return area allAreas = [] maxArea = 0 while True: line = f.readline().strip() if line=="": break points = line.split(" ") corners = [] for p in points: ip = map(float,p.split(",")) corners.append(ip) area = PolygonArea(corners) if (area > 500): print " ".join(points) <INSERT_END> <|endoftext|> #!/usr/bin/python # Polyfilter - filters polygon files for very small polygons import sys f = open(sys.argv[1]) def PolygonArea(corners): n = len(corners) # of corners area = 0.0 for i in range(n): j = (i + 1) % n area += corners[i][0] * corners[j][1] area -= corners[j][0] * corners[i][1] area = abs(area) / 2.0 return area allAreas = [] maxArea = 0 while True: line = f.readline().strip() if line=="": break points = line.split(" ") corners = [] for p in points: ip = map(float,p.split(",")) corners.append(ip) area = PolygonArea(corners) if (area > 500): print " ".join(points)
Add filter program to remove small polygons
5631276591cf2c4e3c83920da32857e47286d9c9
wanikani/django.py
wanikani/django.py
from __future__ import absolute_import import os import logging from django.http import HttpResponse from django.views.generic.base import View from icalendar import Calendar, Event from wanikani.core import WaniKani, Radical, Kanji CONFIG_PATH = os.path.join(os.path.expanduser('~'), '.wanikani') with open(CONFIG_PATH) as fp: API_KEY = fp.read() logger = logging.getLogger(__name__) class WaniKaniView(View): def get(self, request, *args, **kwargs): client = WaniKani(API_KEY) level = client.profile()['level'] queue = client.query(level, items=[Radical, Kanji], include=[u'apprentice']) cal = Calendar() cal.add('prodid', '-//My calendar product//mxm.dk//') cal.add('version', '2.0') for ts in sorted(queue): if not len(queue[ts]): continue counts = { Radical: 0, Kanji: 0, } for obj in queue[ts]: counts[obj.__class__] += 1 event = Event() event.add('summary', 'R: {0} K: {1}'.format( counts[Radical], counts[Kanji] )) event.add('dtstart', ts) event.add('dtend', ts) event['uid'] = str(ts) cal.add_component(event) return HttpResponse( content=cal.to_ical(), content_type='text/plain; charset=utf-8' )
from __future__ import absolute_import from django.http import HttpResponse from django.views.generic.base import View from icalendar import Calendar, Event from wanikani.core import WaniKani, Radical, Kanji class WaniKaniView(View): def get(self, request, **kwargs): client = WaniKani(kwargs['api_key']) level = client.profile()['level'] queue = client.query(level, items=[Radical, Kanji], include=[u'apprentice']) cal = Calendar() cal.add('prodid', '-//Wanikani Blockers//github.com/kfdm/wanikani//') cal.add('version', '2.0') for ts in sorted(queue): if not len(queue[ts]): continue counts = { Radical: 0, Kanji: 0, } for obj in queue[ts]: counts[obj.__class__] += 1 event = Event() event.add('summary', 'R: {0} K: {1}'.format( counts[Radical], counts[Kanji] )) event.add('dtstart', ts) event.add('dtend', ts) event['uid'] = str(ts) cal.add_component(event) return HttpResponse( content=cal.to_ical(), content_type='text/plain; charset=utf-8' )
Switch to getting the API key from the URL instead of a config file.
Switch to getting the API key from the URL instead of a config file. Allows other people to get their anki calendar if they want.
Python
mit
kfdm/wanikani,kfdm/wanikani
<REPLACE_OLD> from <REPLACE_NEW> from <REPLACE_END> <REPLACE_OLD> absolute_import import os import logging from <REPLACE_NEW> absolute_import from <REPLACE_END> <REPLACE_OLD> Kanji CONFIG_PATH = os.path.join(os.path.expanduser('~'), '.wanikani') with open(CONFIG_PATH) as fp: API_KEY = fp.read() logger = logging.getLogger(__name__) class <REPLACE_NEW> Kanji class <REPLACE_END> <DELETE> *args, <DELETE_END> <REPLACE_OLD> WaniKani(API_KEY) <REPLACE_NEW> WaniKani(kwargs['api_key']) <REPLACE_END> <REPLACE_OLD> '-//My calendar product//mxm.dk//') <REPLACE_NEW> '-//Wanikani Blockers//github.com/kfdm/wanikani//') <REPLACE_END> <|endoftext|> from __future__ import absolute_import from django.http import HttpResponse from django.views.generic.base import View from icalendar import Calendar, Event from wanikani.core import WaniKani, Radical, Kanji class WaniKaniView(View): def get(self, request, **kwargs): client = WaniKani(kwargs['api_key']) level = client.profile()['level'] queue = client.query(level, items=[Radical, Kanji], include=[u'apprentice']) cal = Calendar() cal.add('prodid', '-//Wanikani Blockers//github.com/kfdm/wanikani//') cal.add('version', '2.0') for ts in sorted(queue): if not len(queue[ts]): continue counts = { Radical: 0, Kanji: 0, } for obj in queue[ts]: counts[obj.__class__] += 1 event = Event() event.add('summary', 'R: {0} K: {1}'.format( counts[Radical], counts[Kanji] )) event.add('dtstart', ts) event.add('dtend', ts) event['uid'] = str(ts) cal.add_component(event) return HttpResponse( content=cal.to_ical(), content_type='text/plain; charset=utf-8' )
Switch to getting the API key from the URL instead of a config file. Allows other people to get their anki calendar if they want. from __future__ import absolute_import import os import logging from django.http import HttpResponse from django.views.generic.base import View from icalendar import Calendar, Event from wanikani.core import WaniKani, Radical, Kanji CONFIG_PATH = os.path.join(os.path.expanduser('~'), '.wanikani') with open(CONFIG_PATH) as fp: API_KEY = fp.read() logger = logging.getLogger(__name__) class WaniKaniView(View): def get(self, request, *args, **kwargs): client = WaniKani(API_KEY) level = client.profile()['level'] queue = client.query(level, items=[Radical, Kanji], include=[u'apprentice']) cal = Calendar() cal.add('prodid', '-//My calendar product//mxm.dk//') cal.add('version', '2.0') for ts in sorted(queue): if not len(queue[ts]): continue counts = { Radical: 0, Kanji: 0, } for obj in queue[ts]: counts[obj.__class__] += 1 event = Event() event.add('summary', 'R: {0} K: {1}'.format( counts[Radical], counts[Kanji] )) event.add('dtstart', ts) event.add('dtend', ts) event['uid'] = str(ts) cal.add_component(event) return HttpResponse( content=cal.to_ical(), content_type='text/plain; charset=utf-8' )
849a4e5daf2eb845213ea76179d7a8143148f39a
lib/mixins.py
lib/mixins.py
class Countable(object): @classmethod def count(cls, options={}): return int(cls.get("count", **options)) class Metafields(object): def metafields(self): return Metafield.find(resource=self.__class__.plural, resource_id=self.id) def add_metafield(self, metafield): if self.is_new(): raise ValueError("You can only add metafields to a resource that has been saved") metafield._prefix_options = dict(resource=self.__class__.plural, resource_id=self.id) metafield.save() return metafield class Events(object): def events(self): return Event.find(resource=self.__class__.plural, resource_id=self.id)
class Countable(object): @classmethod def count(cls, _options=None, **kwargs): if _options is None: _options = kwargs return int(cls.get("count", **_options)) class Metafields(object): def metafields(self): return Metafield.find(resource=self.__class__.plural, resource_id=self.id) def add_metafield(self, metafield): if self.is_new(): raise ValueError("You can only add metafields to a resource that has been saved") metafield._prefix_options = dict(resource=self.__class__.plural, resource_id=self.id) metafield.save() return metafield class Events(object): def events(self): return Event.find(resource=self.__class__.plural, resource_id=self.id)
Allow count method to be used the same way as find.
Allow count method to be used the same way as find.
Python
mit
varesa/shopify_python_api,metric-collective/shopify_python_api,gavinballard/shopify_python_api,asiviero/shopify_python_api,ifnull/shopify_python_api,Shopify/shopify_python_api,SmileyJames/shopify_python_api
<REPLACE_OLD> options={}): <REPLACE_NEW> _options=None, **kwargs): if _options is None: _options = kwargs <REPLACE_END> <REPLACE_OLD> **options)) class <REPLACE_NEW> **_options)) class <REPLACE_END> <|endoftext|> class Countable(object): @classmethod def count(cls, _options=None, **kwargs): if _options is None: _options = kwargs return int(cls.get("count", **_options)) class Metafields(object): def metafields(self): return Metafield.find(resource=self.__class__.plural, resource_id=self.id) def add_metafield(self, metafield): if self.is_new(): raise ValueError("You can only add metafields to a resource that has been saved") metafield._prefix_options = dict(resource=self.__class__.plural, resource_id=self.id) metafield.save() return metafield class Events(object): def events(self): return Event.find(resource=self.__class__.plural, resource_id=self.id)
Allow count method to be used the same way as find. class Countable(object): @classmethod def count(cls, options={}): return int(cls.get("count", **options)) class Metafields(object): def metafields(self): return Metafield.find(resource=self.__class__.plural, resource_id=self.id) def add_metafield(self, metafield): if self.is_new(): raise ValueError("You can only add metafields to a resource that has been saved") metafield._prefix_options = dict(resource=self.__class__.plural, resource_id=self.id) metafield.save() return metafield class Events(object): def events(self): return Event.find(resource=self.__class__.plural, resource_id=self.id)
4c2c80e0004a758787beb555fbbe789cce5e82fc
nova/tests/test_vmwareapi_vm_util.py
nova/tests/test_vmwareapi_vm_util.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2013 Canonical Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import exception from nova import test from nova.virt.vmwareapi import fake from nova.virt.vmwareapi import vm_util class fake_session(object): def __init__(self, ret=None): self.ret = ret def _call_method(self, *args): return self.ret class VMwareVMUtilTestCase(test.TestCase): def setUp(self): super(VMwareVMUtilTestCase, self).setUp() def tearDown(self): super(VMwareVMUtilTestCase, self).tearDown() def test_get_datastore_ref_and_name(self): result = vm_util.get_datastore_ref_and_name( fake_session([fake.Datastore()])) self.assertEquals(result[1], "fake-ds") self.assertEquals(result[2], 1024 * 1024 * 1024) self.assertEquals(result[3], 1024 * 1024 * 500) def test_get_datastore_ref_and_name_without_datastore(self): self.assertRaises(exception.DatastoreNotFound, vm_util.get_datastore_ref_and_name, fake_session(), host="fake-host") self.assertRaises(exception.DatastoreNotFound, vm_util.get_datastore_ref_and_name, fake_session(), cluster="fake-cluster")
Fix variable referenced before assginment in vmwareapi code.
Fix variable referenced before assginment in vmwareapi code. Add unitests for VMwareapi vm_util. fix bug #1177689 Change-Id: If16109ee626c197227affba122c2e4986d92d2df
Python
apache-2.0
n0ano/gantt,n0ano/gantt
<REPLACE_OLD> <REPLACE_NEW> # vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2013 Canonical Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import exception from nova import test from nova.virt.vmwareapi import fake from nova.virt.vmwareapi import vm_util class fake_session(object): def __init__(self, ret=None): self.ret = ret def _call_method(self, *args): return self.ret class VMwareVMUtilTestCase(test.TestCase): def setUp(self): super(VMwareVMUtilTestCase, self).setUp() def tearDown(self): super(VMwareVMUtilTestCase, self).tearDown() def test_get_datastore_ref_and_name(self): result = vm_util.get_datastore_ref_and_name( fake_session([fake.Datastore()])) self.assertEquals(result[1], "fake-ds") self.assertEquals(result[2], 1024 * 1024 * 1024) self.assertEquals(result[3], 1024 * 1024 * 500) def test_get_datastore_ref_and_name_without_datastore(self): self.assertRaises(exception.DatastoreNotFound, vm_util.get_datastore_ref_and_name, fake_session(), host="fake-host") self.assertRaises(exception.DatastoreNotFound, vm_util.get_datastore_ref_and_name, fake_session(), cluster="fake-cluster") <REPLACE_END> <|endoftext|> # vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2013 Canonical Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import exception from nova import test from nova.virt.vmwareapi import fake from nova.virt.vmwareapi import vm_util class fake_session(object): def __init__(self, ret=None): self.ret = ret def _call_method(self, *args): return self.ret class VMwareVMUtilTestCase(test.TestCase): def setUp(self): super(VMwareVMUtilTestCase, self).setUp() def tearDown(self): super(VMwareVMUtilTestCase, self).tearDown() def test_get_datastore_ref_and_name(self): result = vm_util.get_datastore_ref_and_name( fake_session([fake.Datastore()])) self.assertEquals(result[1], "fake-ds") self.assertEquals(result[2], 1024 * 1024 * 1024) self.assertEquals(result[3], 1024 * 1024 * 500) def test_get_datastore_ref_and_name_without_datastore(self): self.assertRaises(exception.DatastoreNotFound, vm_util.get_datastore_ref_and_name, fake_session(), host="fake-host") self.assertRaises(exception.DatastoreNotFound, vm_util.get_datastore_ref_and_name, fake_session(), cluster="fake-cluster")
Fix variable referenced before assginment in vmwareapi code. Add unitests for VMwareapi vm_util. fix bug #1177689 Change-Id: If16109ee626c197227affba122c2e4986d92d2df
ad4ecad2e5785ddeb7bbd595e59dc12345c4b256
xbob/blitz/__init__.py
xbob/blitz/__init__.py
#!/usr/bin/env python # vim: set fileencoding=utf-8 : # Andre Anjos <[email protected]> # Fri 20 Sep 14:45:01 2013 """Blitz++ Array bindings for Python""" import pkg_resources from ._library import array, as_blitz from . import version from .version import module as __version__ from .version import api as __api_version__ def get_include(): """Returns the directory containing the C/C++ API include directives""" return pkg_resources.resource_filename(__name__, 'include') def get_config(): """Returns a string containing the configuration information. """ from .version import externals packages = pkg_resources.require(__name__) this = packages[0] deps = packages[1:] retval = "%s: %s (%s)\n" % (this.key, this.version, this.location) retval += " - c/c++ dependencies:\n" for k in sorted(externals): retval += " - %s: %s\n" % (k, externals[k]) retval += " - python dependencies:\n" for d in deps: retval += " - %s: %s (%s)\n" % (d.key, d.version, d.location) return retval.strip() # gets sphinx autodoc done right - don't remove it __all__ = [_ for _ in dir() if not _.startswith('_')]
#!/usr/bin/env python # vim: set fileencoding=utf-8 : # Andre Anjos <[email protected]> # Fri 20 Sep 14:45:01 2013 """Blitz++ Array bindings for Python""" import pkg_resources from ._library import array, as_blitz from . import version from .version import module as __version__ from .version import api as __api_version__ def get_include(): """Returns the directory containing the C/C++ API include directives""" return pkg_resources.resource_filename(__name__, 'include') def get_config(): """Returns a string containing the configuration information. """ from .version import externals packages = pkg_resources.require(__name__) this = packages[0] deps = packages[1:] retval = "%s: %s [api=0x%04x] (%s)\n" % (this.key, this.version, version.api, this.location) retval += " - c/c++ dependencies:\n" for k in sorted(externals): retval += " - %s: %s\n" % (k, externals[k]) retval += " - python dependencies:\n" for d in deps: retval += " - %s: %s (%s)\n" % (d.key, d.version, d.location) return retval.strip() # gets sphinx autodoc done right - don't remove it __all__ = [_ for _ in dir() if not _.startswith('_')]
Add API number in get_config()
Add API number in get_config()
Python
bsd-3-clause
tiagofrepereira2012/bob.blitz,tiagofrepereira2012/bob.blitz,tiagofrepereira2012/bob.blitz
<INSERT> [api=0x%04x] <INSERT_END> <REPLACE_OLD> this.version, <REPLACE_NEW> this.version, version.api, <REPLACE_END> <|endoftext|> #!/usr/bin/env python # vim: set fileencoding=utf-8 : # Andre Anjos <[email protected]> # Fri 20 Sep 14:45:01 2013 """Blitz++ Array bindings for Python""" import pkg_resources from ._library import array, as_blitz from . import version from .version import module as __version__ from .version import api as __api_version__ def get_include(): """Returns the directory containing the C/C++ API include directives""" return pkg_resources.resource_filename(__name__, 'include') def get_config(): """Returns a string containing the configuration information. """ from .version import externals packages = pkg_resources.require(__name__) this = packages[0] deps = packages[1:] retval = "%s: %s [api=0x%04x] (%s)\n" % (this.key, this.version, version.api, this.location) retval += " - c/c++ dependencies:\n" for k in sorted(externals): retval += " - %s: %s\n" % (k, externals[k]) retval += " - python dependencies:\n" for d in deps: retval += " - %s: %s (%s)\n" % (d.key, d.version, d.location) return retval.strip() # gets sphinx autodoc done right - don't remove it __all__ = [_ for _ in dir() if not _.startswith('_')]
Add API number in get_config() #!/usr/bin/env python # vim: set fileencoding=utf-8 : # Andre Anjos <[email protected]> # Fri 20 Sep 14:45:01 2013 """Blitz++ Array bindings for Python""" import pkg_resources from ._library import array, as_blitz from . import version from .version import module as __version__ from .version import api as __api_version__ def get_include(): """Returns the directory containing the C/C++ API include directives""" return pkg_resources.resource_filename(__name__, 'include') def get_config(): """Returns a string containing the configuration information. """ from .version import externals packages = pkg_resources.require(__name__) this = packages[0] deps = packages[1:] retval = "%s: %s (%s)\n" % (this.key, this.version, this.location) retval += " - c/c++ dependencies:\n" for k in sorted(externals): retval += " - %s: %s\n" % (k, externals[k]) retval += " - python dependencies:\n" for d in deps: retval += " - %s: %s (%s)\n" % (d.key, d.version, d.location) return retval.strip() # gets sphinx autodoc done right - don't remove it __all__ = [_ for _ in dir() if not _.startswith('_')]
fd50ce4b22b4f3d948a64ed400340c0fc744de49
src/waldur_core/core/migrations/0008_changeemailrequest_uuid.py
src/waldur_core/core/migrations/0008_changeemailrequest_uuid.py
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0007_changeemailrequest'), ] operations = [ migrations.AddField( model_name='changeemailrequest', name='uuid', field=models.UUIDField(), ), ]
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0007_changeemailrequest'), ] operations = [ migrations.AddField( model_name='changeemailrequest', name='uuid', field=models.UUIDField(null=True), ), ]
Allow null values in UUID field.
Allow null values in UUID field.
Python
mit
opennode/nodeconductor-assembly-waldur,opennode/waldur-mastermind,opennode/nodeconductor-assembly-waldur,opennode/waldur-mastermind,opennode/waldur-mastermind,opennode/nodeconductor-assembly-waldur,opennode/waldur-mastermind
<REPLACE_OLD> model_name='changeemailrequest', name='uuid', field=models.UUIDField(), <REPLACE_NEW> model_name='changeemailrequest', name='uuid', field=models.UUIDField(null=True), <REPLACE_END> <|endoftext|> from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0007_changeemailrequest'), ] operations = [ migrations.AddField( model_name='changeemailrequest', name='uuid', field=models.UUIDField(null=True), ), ]
Allow null values in UUID field. from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0007_changeemailrequest'), ] operations = [ migrations.AddField( model_name='changeemailrequest', name='uuid', field=models.UUIDField(), ), ]
7bf86f0ef0572e86370726ff25479d051b3fbd3e
scripts/check_dataset_integrity.py
scripts/check_dataset_integrity.py
import os from collections import defaultdict import click import dtoolcore @click.command() @click.argument('dataset_path') def main(dataset_path): uri = "disk:{}".format(dataset_path) proto_dataset = dtoolcore.ProtoDataSet.from_uri(uri) overlays = defaultdict(dict) for handle in proto_dataset._storage_broker.iter_item_handles(): identifier = dtoolcore.utils.generate_identifier(handle) item_metadata = proto_dataset._storage_broker.get_item_metadata(handle) for k, v in item_metadata.items(): overlays[k][identifier] = v print overlays.keys() # for handle in proto_dataset._storage_broker.iter_item_handles(): # print(handle) for overlay in overlays: print len(overlays[overlay]) if __name__ == '__main__': main()
Add script to check dataset integrity
Add script to check dataset integrity
Python
mit
JIC-Image-Analysis/senescence-in-field,JIC-Image-Analysis/senescence-in-field,JIC-Image-Analysis/senescence-in-field
<INSERT> import os from collections import defaultdict import click import dtoolcore @click.command() @click.argument('dataset_path') def main(dataset_path): <INSERT_END> <INSERT> uri = "disk:{}".format(dataset_path) proto_dataset = dtoolcore.ProtoDataSet.from_uri(uri) overlays = defaultdict(dict) for handle in proto_dataset._storage_broker.iter_item_handles(): identifier = dtoolcore.utils.generate_identifier(handle) item_metadata = proto_dataset._storage_broker.get_item_metadata(handle) for k, v in item_metadata.items(): overlays[k][identifier] = v print overlays.keys() # for handle in proto_dataset._storage_broker.iter_item_handles(): # print(handle) for overlay in overlays: print len(overlays[overlay]) if __name__ == '__main__': main() <INSERT_END> <|endoftext|> import os from collections import defaultdict import click import dtoolcore @click.command() @click.argument('dataset_path') def main(dataset_path): uri = "disk:{}".format(dataset_path) proto_dataset = dtoolcore.ProtoDataSet.from_uri(uri) overlays = defaultdict(dict) for handle in proto_dataset._storage_broker.iter_item_handles(): identifier = dtoolcore.utils.generate_identifier(handle) item_metadata = proto_dataset._storage_broker.get_item_metadata(handle) for k, v in item_metadata.items(): overlays[k][identifier] = v print overlays.keys() # for handle in proto_dataset._storage_broker.iter_item_handles(): # print(handle) for overlay in overlays: print len(overlays[overlay]) if __name__ == '__main__': main()
Add script to check dataset integrity
f381214b4d05fb0c809888ea6362a4125ae3b779
test/Configure/VariantDir2.py
test/Configure/VariantDir2.py
#!/usr/bin/env python # # __COPYRIGHT__ # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" """ Verify that Configure contexts work with SConstruct/SConscript structure """ import os import TestSCons test = TestSCons.TestSCons() test.write('SConstruct', """\ SConscript('SConscript', build_dir='build', src='.') """) test.write('SConscript', """\ env = Environment() config = env.Configure(conf_dir='sconf', log_file='config.log') config.TryRun("int main() {}", ".c") config.Finish() """) test.run() test.pass_test()
Add test case for configure failure.
Add test case for configure failure. TryRun fails to find the executable when VariantDir is set up from SConscript/SConstruct. git-svn-id: 7892167f69f80ee5d3024affce49f20c74bcb41d@4363 fdb21ef1-2011-0410-befe-b5e4ea1792b1
Python
mit
azverkan/scons,azverkan/scons,azverkan/scons,azverkan/scons,azverkan/scons
<REPLACE_OLD> <REPLACE_NEW> #!/usr/bin/env python # # __COPYRIGHT__ # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" """ Verify that Configure contexts work with SConstruct/SConscript structure """ import os import TestSCons test = TestSCons.TestSCons() test.write('SConstruct', """\ SConscript('SConscript', build_dir='build', src='.') """) test.write('SConscript', """\ env = Environment() config = env.Configure(conf_dir='sconf', log_file='config.log') config.TryRun("int main() {}", ".c") config.Finish() """) test.run() test.pass_test() <REPLACE_END> <|endoftext|> #!/usr/bin/env python # # __COPYRIGHT__ # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" """ Verify that Configure contexts work with SConstruct/SConscript structure """ import os import TestSCons test = TestSCons.TestSCons() test.write('SConstruct', """\ SConscript('SConscript', build_dir='build', src='.') """) test.write('SConscript', """\ env = Environment() config = env.Configure(conf_dir='sconf', log_file='config.log') config.TryRun("int main() {}", ".c") config.Finish() """) test.run() test.pass_test()
Add test case for configure failure. TryRun fails to find the executable when VariantDir is set up from SConscript/SConstruct. git-svn-id: 7892167f69f80ee5d3024affce49f20c74bcb41d@4363 fdb21ef1-2011-0410-befe-b5e4ea1792b1
7dced29bcf8b2b5f5220f5dbfeaf631d9d5fc409
examples/backtest.py
examples/backtest.py
import time import logging from pythonjsonlogger import jsonlogger from flumine import FlumineBacktest, clients from strategies.lowestlayer import LowestLayer logger = logging.getLogger() custom_format = "%(asctime) %(levelname) %(message)" log_handler = logging.StreamHandler() formatter = jsonlogger.JsonFormatter(custom_format) formatter.converter = time.gmtime log_handler.setFormatter(formatter) logger.addHandler(log_handler) logger.setLevel(logging.INFO) client = clients.BacktestClient() framework = FlumineBacktest(client=client) markets = ["tests/resources/PRO-1.170258213"] strategy = LowestLayer( market_filter={"markets": markets}, max_order_exposure=1000, max_selection_exposure=105, context={"stake": 2}, ) framework.add_strategy(strategy) framework.run() for market in framework.markets: print("Profit: {0:.2f}".format(sum([o.simulated.profit for o in market.blotter]))) for order in market.blotter: print( order.selection_id, order.responses.date_time_placed, order.status, order.order_type.price, order.average_price_matched, order.size_matched, order.simulated.profit, )
import time import logging from pythonjsonlogger import jsonlogger from flumine import FlumineBacktest, clients from strategies.lowestlayer import LowestLayer logger = logging.getLogger() custom_format = "%(asctime) %(levelname) %(message)" log_handler = logging.StreamHandler() formatter = jsonlogger.JsonFormatter(custom_format) formatter.converter = time.gmtime log_handler.setFormatter(formatter) logger.addHandler(log_handler) logger.setLevel(logging.INFO) # Set to logging.CRITICAL to speed up backtest client = clients.BacktestClient() framework = FlumineBacktest(client=client) markets = ["tests/resources/PRO-1.170258213"] strategy = LowestLayer( market_filter={"markets": markets}, max_order_exposure=1000, max_selection_exposure=105, context={"stake": 2}, ) framework.add_strategy(strategy) framework.run() for market in framework.markets: print("Profit: {0:.2f}".format(sum([o.simulated.profit for o in market.blotter]))) for order in market.blotter: print( order.selection_id, order.responses.date_time_placed, order.status, order.order_type.price, order.average_price_matched, order.size_matched, order.simulated.profit, )
Comment to say using logging.CRITICAL is faster
Comment to say using logging.CRITICAL is faster
Python
mit
liampauling/flumine
<REPLACE_OLD> time.gmtime log_handler.setFormatter(formatter) logger.addHandler(log_handler) logger.setLevel(logging.INFO) client <REPLACE_NEW> time.gmtime log_handler.setFormatter(formatter) logger.addHandler(log_handler) logger.setLevel(logging.INFO) # Set to logging.CRITICAL to speed up backtest client <REPLACE_END> <|endoftext|> import time import logging from pythonjsonlogger import jsonlogger from flumine import FlumineBacktest, clients from strategies.lowestlayer import LowestLayer logger = logging.getLogger() custom_format = "%(asctime) %(levelname) %(message)" log_handler = logging.StreamHandler() formatter = jsonlogger.JsonFormatter(custom_format) formatter.converter = time.gmtime log_handler.setFormatter(formatter) logger.addHandler(log_handler) logger.setLevel(logging.INFO) # Set to logging.CRITICAL to speed up backtest client = clients.BacktestClient() framework = FlumineBacktest(client=client) markets = ["tests/resources/PRO-1.170258213"] strategy = LowestLayer( market_filter={"markets": markets}, max_order_exposure=1000, max_selection_exposure=105, context={"stake": 2}, ) framework.add_strategy(strategy) framework.run() for market in framework.markets: print("Profit: {0:.2f}".format(sum([o.simulated.profit for o in market.blotter]))) for order in market.blotter: print( order.selection_id, order.responses.date_time_placed, order.status, order.order_type.price, order.average_price_matched, order.size_matched, order.simulated.profit, )
Comment to say using logging.CRITICAL is faster import time import logging from pythonjsonlogger import jsonlogger from flumine import FlumineBacktest, clients from strategies.lowestlayer import LowestLayer logger = logging.getLogger() custom_format = "%(asctime) %(levelname) %(message)" log_handler = logging.StreamHandler() formatter = jsonlogger.JsonFormatter(custom_format) formatter.converter = time.gmtime log_handler.setFormatter(formatter) logger.addHandler(log_handler) logger.setLevel(logging.INFO) client = clients.BacktestClient() framework = FlumineBacktest(client=client) markets = ["tests/resources/PRO-1.170258213"] strategy = LowestLayer( market_filter={"markets": markets}, max_order_exposure=1000, max_selection_exposure=105, context={"stake": 2}, ) framework.add_strategy(strategy) framework.run() for market in framework.markets: print("Profit: {0:.2f}".format(sum([o.simulated.profit for o in market.blotter]))) for order in market.blotter: print( order.selection_id, order.responses.date_time_placed, order.status, order.order_type.price, order.average_price_matched, order.size_matched, order.simulated.profit, )
024b862bdd4ae3bf4c3058ef32b6016b280a4cf6
tests/web/test_request.py
tests/web/test_request.py
import unittest from performance.web import Request, RequestTypeError, RequestTimeError class RequestTestCase(unittest.TestCase): def setUp(self): self.url = 'http://www.google.com' def test_constants(self): self.assertEqual('get', Request.GET) self.assertEqual('post', Request.POST) def test_init(self): request = Request(url=self.url) self.assertEqual(Request.GET, request.type) self.assertEqual(self.url, request.url) request = Request(url=self.url, type=Request.POST) self.assertEqual(Request.POST, request.type) def test_do(self): request = Request(url=self.url, type=Request.GET) request.do() self.assertTrue(hasattr(request, 'status_code')) request.type = Request.POST request.do() self.assertTrue(hasattr(request, 'status_code')) def test_invalid_type(self): type = 'foo_bar' request = Request(url=self.url, type=type) with self.assertRaises(RequestTypeError) as error: request.do() self.assertEqual('Invalid request type "%s"' % type, error.exception.__str__()) def test_response_time(self): request = Request(url=self.url, type=Request.GET) request.do() self.assertEqual(request.finished - request.started, request.get_response_time()) def test_time_error(self): request = Request(url=self.url, type=Request.GET) with self.assertRaises(RequestTimeError): request.get_response_time()
import unittest from performance.web import Request, RequestTypeError, RequestTimeError class RequestTestCase(unittest.TestCase): def setUp(self): self.host = 'http://www.google.com' def test_constants(self): self.assertEqual('get', Request.GET) self.assertEqual('post', Request.POST) def test_init(self): request = Request(url=self.host) self.assertEqual(Request.GET, request.type) self.assertEqual(self.host, request.url) request = Request(url=self.host, type=Request.POST) self.assertEqual(Request.POST, request.type) def test_do(self): request = Request(url=self.host, type=Request.GET) request.do() def test_invalid_type(self): type = 'foo_bar' request = Request(url=self.host, type=type) with self.assertRaises(RequestTypeError) as error: request.do() self.assertEqual('Invalid request type "%s"' % type, error.exception.__str__())
Remove tests for response_time, update variable names
Remove tests for response_time, update variable names
Python
mit
BakeCode/performance-testing,BakeCode/performance-testing
<REPLACE_OLD> self.url <REPLACE_NEW> self.host <REPLACE_END> <REPLACE_OLD> Request(url=self.url) <REPLACE_NEW> Request(url=self.host) <REPLACE_END> <REPLACE_OLD> self.assertEqual(self.url, <REPLACE_NEW> self.assertEqual(self.host, <REPLACE_END> <REPLACE_OLD> Request(url=self.url, <REPLACE_NEW> Request(url=self.host, <REPLACE_END> <REPLACE_OLD> Request(url=self.url, <REPLACE_NEW> Request(url=self.host, <REPLACE_END> <REPLACE_OLD> request.do() self.assertTrue(hasattr(request, 'status_code')) request.type = Request.POST request.do() self.assertTrue(hasattr(request, 'status_code')) <REPLACE_NEW> request.do() <REPLACE_END> <REPLACE_OLD> Request(url=self.url, <REPLACE_NEW> Request(url=self.host, <REPLACE_END> <REPLACE_OLD> error.exception.__str__()) def test_response_time(self): request = Request(url=self.url, type=Request.GET) request.do() self.assertEqual(request.finished - request.started, request.get_response_time()) def test_time_error(self): request = Request(url=self.url, type=Request.GET) with self.assertRaises(RequestTimeError): request.get_response_time() <REPLACE_NEW> error.exception.__str__()) <REPLACE_END> <|endoftext|> import unittest from performance.web import Request, RequestTypeError, RequestTimeError class RequestTestCase(unittest.TestCase): def setUp(self): self.host = 'http://www.google.com' def test_constants(self): self.assertEqual('get', Request.GET) self.assertEqual('post', Request.POST) def test_init(self): request = Request(url=self.host) self.assertEqual(Request.GET, request.type) self.assertEqual(self.host, request.url) request = Request(url=self.host, type=Request.POST) self.assertEqual(Request.POST, request.type) def test_do(self): request = Request(url=self.host, type=Request.GET) request.do() def test_invalid_type(self): type = 'foo_bar' request = Request(url=self.host, type=type) with self.assertRaises(RequestTypeError) as error: request.do() self.assertEqual('Invalid request type "%s"' % type, error.exception.__str__())
Remove tests for response_time, update variable names import unittest from performance.web import Request, RequestTypeError, RequestTimeError class RequestTestCase(unittest.TestCase): def setUp(self): self.url = 'http://www.google.com' def test_constants(self): self.assertEqual('get', Request.GET) self.assertEqual('post', Request.POST) def test_init(self): request = Request(url=self.url) self.assertEqual(Request.GET, request.type) self.assertEqual(self.url, request.url) request = Request(url=self.url, type=Request.POST) self.assertEqual(Request.POST, request.type) def test_do(self): request = Request(url=self.url, type=Request.GET) request.do() self.assertTrue(hasattr(request, 'status_code')) request.type = Request.POST request.do() self.assertTrue(hasattr(request, 'status_code')) def test_invalid_type(self): type = 'foo_bar' request = Request(url=self.url, type=type) with self.assertRaises(RequestTypeError) as error: request.do() self.assertEqual('Invalid request type "%s"' % type, error.exception.__str__()) def test_response_time(self): request = Request(url=self.url, type=Request.GET) request.do() self.assertEqual(request.finished - request.started, request.get_response_time()) def test_time_error(self): request = Request(url=self.url, type=Request.GET) with self.assertRaises(RequestTimeError): request.get_response_time()
5f416dadecf21accbaccd69740c5398967ec4a7c
doubles/nose.py
doubles/nose.py
from __future__ import absolute_import import sys from nose.plugins.base import Plugin from doubles.lifecycle import setup, verify, teardown, current_space from doubles.exceptions import MockExpectationError class NoseIntegration(Plugin): name = 'doubles' def beforeTest(self, test): setup() def afterTest(self, test): if current_space(): teardown() def prepareTestCase(self, test): def wrapped(result): test.test.run() if result.failures or result.errors: return try: if current_space(): verify() except MockExpectationError: result.addFailure(test.test, sys.exc_info()) return wrapped
from __future__ import absolute_import import sys from nose.plugins.base import Plugin from doubles.lifecycle import setup, verify, teardown, current_space from doubles.exceptions import MockExpectationError class NoseIntegration(Plugin): name = 'doubles' def beforeTest(self, test): setup() def afterTest(self, test): if current_space(): teardown() def prepareTestCase(self, test): def wrapped(result): test.test.run() try: if current_space(): verify() except MockExpectationError: result.addFailure(test.test, sys.exc_info()) return wrapped
Remove verification skipping in Nose plugin for now.
Remove verification skipping in Nose plugin for now.
Python
mit
uber/doubles
<REPLACE_OLD> test.test.run() if result.failures or result.errors: return <REPLACE_NEW> test.test.run() <REPLACE_END> <REPLACE_OLD> sys.exc_info()) <REPLACE_NEW> sys.exc_info()) <REPLACE_END> <|endoftext|> from __future__ import absolute_import import sys from nose.plugins.base import Plugin from doubles.lifecycle import setup, verify, teardown, current_space from doubles.exceptions import MockExpectationError class NoseIntegration(Plugin): name = 'doubles' def beforeTest(self, test): setup() def afterTest(self, test): if current_space(): teardown() def prepareTestCase(self, test): def wrapped(result): test.test.run() try: if current_space(): verify() except MockExpectationError: result.addFailure(test.test, sys.exc_info()) return wrapped
Remove verification skipping in Nose plugin for now. from __future__ import absolute_import import sys from nose.plugins.base import Plugin from doubles.lifecycle import setup, verify, teardown, current_space from doubles.exceptions import MockExpectationError class NoseIntegration(Plugin): name = 'doubles' def beforeTest(self, test): setup() def afterTest(self, test): if current_space(): teardown() def prepareTestCase(self, test): def wrapped(result): test.test.run() if result.failures or result.errors: return try: if current_space(): verify() except MockExpectationError: result.addFailure(test.test, sys.exc_info()) return wrapped
5c61d7f125078cb6b3bd0c5700ae9219baab0078
webapp/tests/test_dashboard.py
webapp/tests/test_dashboard.py
from django.core.urlresolvers import reverse from django.test import TestCase class DashboardTest(TestCase): def test_dashboard(self): url = reverse('graphite.dashboard.views.dashboard') response = self.client.get(url) self.assertEqual(response.status_code, 200)
from django.core.urlresolvers import reverse from django.test import TestCase class DashboardTest(TestCase): def test_dashboard(self): url = reverse('dashboard') response = self.client.get(url) self.assertEqual(response.status_code, 200)
Update reverse call to use named URL
Update reverse call to use named URL
Python
apache-2.0
redice/graphite-web,dbn/graphite-web,Skyscanner/graphite-web,penpen/graphite-web,lyft/graphite-web,esnet/graphite-web,bpaquet/graphite-web,atnak/graphite-web,section-io/graphite-web,kkdk5535/graphite-web,cosm0s/graphite-web,cgvarela/graphite-web,gwaldo/graphite-web,redice/graphite-web,edwardmlyte/graphite-web,atnak/graphite-web,criteo-forks/graphite-web,bmhatfield/graphite-web,obfuscurity/graphite-web,AICIDNN/graphite-web,edwardmlyte/graphite-web,gwaldo/graphite-web,penpen/graphite-web,cbowman0/graphite-web,Skyscanner/graphite-web,JeanFred/graphite-web,cgvarela/graphite-web,bruce-lyft/graphite-web,criteo-forks/graphite-web,phreakocious/graphite-web,lyft/graphite-web,mcoolive/graphite-web,piotr1212/graphite-web,esnet/graphite-web,zBMNForks/graphite-web,axibase/graphite-web,blacked/graphite-web,AICIDNN/graphite-web,kkdk5535/graphite-web,atnak/graphite-web,ZelunZhang/graphite-web,graphite-server/graphite-web,cgvarela/graphite-web,graphite-server/graphite-web,zBMNForks/graphite-web,synedge/graphite-web,pu239ppy/graphite-web,axibase/graphite-web,gwaldo/graphite-web,jssjr/graphite-web,esnet/graphite-web,Skyscanner/graphite-web,bmhatfield/graphite-web,DanCech/graphite-web,lfckop/graphite-web,JeanFred/graphite-web,edwardmlyte/graphite-web,brutasse/graphite-web,esnet/graphite-web,Invoca/graphite-web,johnseekins/graphite-web,esnet/graphite-web,criteo-forks/graphite-web,mcoolive/graphite-web,pu239ppy/graphite-web,bmhatfield/graphite-web,edwardmlyte/graphite-web,ZelunZhang/graphite-web,DanCech/graphite-web,section-io/graphite-web,penpen/graphite-web,cbowman0/graphite-web,Invoca/graphite-web,markolson/graphite-web,markolson/graphite-web,Aloomaio/graphite-web,graphite-project/graphite-web,mcoolive/graphite-web,edwardmlyte/graphite-web,atnak/graphite-web,graphite-project/graphite-web,Squarespace/graphite-web,cbowman0/graphite-web,kkdk5535/graphite-web,lyft/graphite-web,criteo-forks/graphite-web,JeanFred/graphite-web,redice/graphite-web,graphite-server/graphite-web,dbn/graphite-web,pu239ppy/graphite-web,piotr1212/graphite-web,brutasse/graphite-web,phreakocious/graphite-web,mcoolive/graphite-web,lfckop/graphite-web,penpen/graphite-web,DanCech/graphite-web,johnseekins/graphite-web,blacked/graphite-web,bpaquet/graphite-web,cbowman0/graphite-web,cgvarela/graphite-web,markolson/graphite-web,bpaquet/graphite-web,graphite-project/graphite-web,bpaquet/graphite-web,bruce-lyft/graphite-web,Invoca/graphite-web,Squarespace/graphite-web,lyft/graphite-web,synedge/graphite-web,bbc/graphite-web,piotr1212/graphite-web,pu239ppy/graphite-web,johnseekins/graphite-web,JeanFred/graphite-web,DanCech/graphite-web,obfuscurity/graphite-web,lyft/graphite-web,cosm0s/graphite-web,Invoca/graphite-web,deniszh/graphite-web,krux/graphite-web,bbc/graphite-web,goir/graphite-web,Squarespace/graphite-web,DanCech/graphite-web,Aloomaio/graphite-web,bpaquet/graphite-web,johnseekins/graphite-web,disqus/graphite-web,penpen/graphite-web,section-io/graphite-web,cgvarela/graphite-web,lfckop/graphite-web,Invoca/graphite-web,jssjr/graphite-web,AICIDNN/graphite-web,bruce-lyft/graphite-web,cosm0s/graphite-web,ZelunZhang/graphite-web,graphite-project/graphite-web,zBMNForks/graphite-web,cgvarela/graphite-web,JeanFred/graphite-web,piotr1212/graphite-web,goir/graphite-web,cbowman0/graphite-web,drax68/graphite-web,criteo-forks/graphite-web,synedge/graphite-web,synedge/graphite-web,nkhuyu/graphite-web,JeanFred/graphite-web,phreakocious/graphite-web,Aloomaio/graphite-web,bbc/graphite-web,phreakocious/graphite-web,cosm0s/graphite-web,phreakocious/graphite-web,brutasse/graphite-web,graphite-server/graphite-web,Squarespace/graphite-web,gwaldo/graphite-web,cosm0s/graphite-web,deniszh/graphite-web,pu239ppy/graphite-web,blacked/graphite-web,Invoca/graphite-web,krux/graphite-web,AICIDNN/graphite-web,redice/graphite-web,piotr1212/graphite-web,cbowman0/graphite-web,section-io/graphite-web,obfuscurity/graphite-web,redice/graphite-web,section-io/graphite-web,deniszh/graphite-web,krux/graphite-web,nkhuyu/graphite-web,gwaldo/graphite-web,nkhuyu/graphite-web,graphite-server/graphite-web,deniszh/graphite-web,goir/graphite-web,Aloomaio/graphite-web,drax68/graphite-web,markolson/graphite-web,krux/graphite-web,obfuscurity/graphite-web,dbn/graphite-web,bruce-lyft/graphite-web,markolson/graphite-web,bmhatfield/graphite-web,brutasse/graphite-web,atnak/graphite-web,Skyscanner/graphite-web,krux/graphite-web,blacked/graphite-web,kkdk5535/graphite-web,jssjr/graphite-web,goir/graphite-web,lfckop/graphite-web,bmhatfield/graphite-web,cosm0s/graphite-web,Squarespace/graphite-web,disqus/graphite-web,ZelunZhang/graphite-web,Aloomaio/graphite-web,axibase/graphite-web,disqus/graphite-web,bbc/graphite-web,ZelunZhang/graphite-web,redice/graphite-web,goir/graphite-web,bruce-lyft/graphite-web,ZelunZhang/graphite-web,AICIDNN/graphite-web,axibase/graphite-web,mcoolive/graphite-web,disqus/graphite-web,johnseekins/graphite-web,drax68/graphite-web,drax68/graphite-web,lyft/graphite-web,graphite-project/graphite-web,lfckop/graphite-web,bmhatfield/graphite-web,blacked/graphite-web,axibase/graphite-web,axibase/graphite-web,krux/graphite-web,disqus/graphite-web,drax68/graphite-web,bbc/graphite-web,criteo-forks/graphite-web,pu239ppy/graphite-web,obfuscurity/graphite-web,nkhuyu/graphite-web,drax68/graphite-web,jssjr/graphite-web,obfuscurity/graphite-web,kkdk5535/graphite-web,jssjr/graphite-web,bruce-lyft/graphite-web,Aloomaio/graphite-web,kkdk5535/graphite-web,zBMNForks/graphite-web,DanCech/graphite-web,dbn/graphite-web,Squarespace/graphite-web,AICIDNN/graphite-web,zBMNForks/graphite-web,section-io/graphite-web,mcoolive/graphite-web,edwardmlyte/graphite-web,synedge/graphite-web,graphite-project/graphite-web,atnak/graphite-web,nkhuyu/graphite-web,brutasse/graphite-web,jssjr/graphite-web,disqus/graphite-web,johnseekins/graphite-web,synedge/graphite-web,Skyscanner/graphite-web,Skyscanner/graphite-web,lfckop/graphite-web,zBMNForks/graphite-web,deniszh/graphite-web,nkhuyu/graphite-web,penpen/graphite-web,dbn/graphite-web,blacked/graphite-web,brutasse/graphite-web,deniszh/graphite-web,graphite-server/graphite-web,bpaquet/graphite-web,gwaldo/graphite-web,goir/graphite-web,phreakocious/graphite-web,piotr1212/graphite-web,dbn/graphite-web
<REPLACE_OLD> reverse('graphite.dashboard.views.dashboard') <REPLACE_NEW> reverse('dashboard') <REPLACE_END> <|endoftext|> from django.core.urlresolvers import reverse from django.test import TestCase class DashboardTest(TestCase): def test_dashboard(self): url = reverse('dashboard') response = self.client.get(url) self.assertEqual(response.status_code, 200)
Update reverse call to use named URL from django.core.urlresolvers import reverse from django.test import TestCase class DashboardTest(TestCase): def test_dashboard(self): url = reverse('graphite.dashboard.views.dashboard') response = self.client.get(url) self.assertEqual(response.status_code, 200)
517e8f16dbc24af3371a287e69c4d1361c1744f6
python_scripts/azure_sense.py
python_scripts/azure_sense.py
#!/usr/bin/env python """ sends temperature, humidity and pressure gathered from Sense Hat on Raspberry Pi2 to Azure Table Storage only python works with Azure , not python3, sudo pip install azure-storage invoke (no sudo required): python azure_sense.py """ import time from sense_hat import SenseHat from datetime import datetime from azure.storage.table import TableService __author__ = "Anatoly Mironov @mirontoli" sense = SenseHat() table_service = TableService(account_name='tolle', account_key='ho2zakf/8rmDckS3pGOTPWwIwCzNwVJxd5hDb3R15wms2fZJG/aX53PDsTWBYsuTPwF7802IKk2QcrJ5FO7i6w==') table_name = 'climateData' table_service.create_table(table_name, False) while True: date = datetime.now() iso_date = date.isoformat() temp = "{0:.2f}".format(sense.temp) humidity = "{0:.2f}".format(sense.humidity) pressure = "{0:.2f}".format(sense.pressure) entry = {'PartitionKey': 'climate', 'RowKey': iso_date, 'Temperature': temp, 'Humidity':humidity, 'Pressure':pressure} table_service.insert_entity(table_name, entry) time.sleep(60) # wait one minute
Add script for sending sense info to azure table storage
Add script for sending sense info to azure table storage
Python
mit
mirontoli/tolle-rasp,mirontoli/tolle-rasp,mirontoli/tolle-rasp,mirontoli/tolle-rasp,mirontoli/tolle-rasp
<INSERT> #!/usr/bin/env python """ sends temperature, humidity and pressure gathered from Sense Hat on Raspberry Pi2 to Azure Table Storage only python works with Azure , not python3, sudo pip install azure-storage invoke (no sudo required): python azure_sense.py """ import time from sense_hat import SenseHat from datetime import datetime from azure.storage.table import TableService __author__ = "Anatoly Mironov @mirontoli" sense = SenseHat() table_service = TableService(account_name='tolle', account_key='ho2zakf/8rmDckS3pGOTPWwIwCzNwVJxd5hDb3R15wms2fZJG/aX53PDsTWBYsuTPwF7802IKk2QcrJ5FO7i6w==') table_name = 'climateData' table_service.create_table(table_name, False) while True: <INSERT_END> <INSERT> date = datetime.now() iso_date = date.isoformat() temp = "{0:.2f}".format(sense.temp) humidity = "{0:.2f}".format(sense.humidity) pressure = "{0:.2f}".format(sense.pressure) entry = {'PartitionKey': 'climate', 'RowKey': iso_date, 'Temperature': temp, 'Humidity':humidity, 'Pressure':pressure} table_service.insert_entity(table_name, entry) time.sleep(60) # wait one minute <INSERT_END> <|endoftext|> #!/usr/bin/env python """ sends temperature, humidity and pressure gathered from Sense Hat on Raspberry Pi2 to Azure Table Storage only python works with Azure , not python3, sudo pip install azure-storage invoke (no sudo required): python azure_sense.py """ import time from sense_hat import SenseHat from datetime import datetime from azure.storage.table import TableService __author__ = "Anatoly Mironov @mirontoli" sense = SenseHat() table_service = TableService(account_name='tolle', account_key='ho2zakf/8rmDckS3pGOTPWwIwCzNwVJxd5hDb3R15wms2fZJG/aX53PDsTWBYsuTPwF7802IKk2QcrJ5FO7i6w==') table_name = 'climateData' table_service.create_table(table_name, False) while True: date = datetime.now() iso_date = date.isoformat() temp = "{0:.2f}".format(sense.temp) humidity = "{0:.2f}".format(sense.humidity) pressure = "{0:.2f}".format(sense.pressure) entry = {'PartitionKey': 'climate', 'RowKey': iso_date, 'Temperature': temp, 'Humidity':humidity, 'Pressure':pressure} table_service.insert_entity(table_name, entry) time.sleep(60) # wait one minute
Add script for sending sense info to azure table storage
f2ab04ec2eb870e661223fd397d7c5a23935a233
src/apps/employees/schema/types.py
src/apps/employees/schema/types.py
import graphene from graphene_django.types import DjangoObjectType, ObjectType from graphene_django_extras import ( DjangoFilterPaginateListField, LimitOffsetGraphqlPagination ) from apps.employees import models class EmployeeType(DjangoObjectType): class Meta: model = models.Employee filter_fields = { 'first_name': ['icontains', 'istartswith'], 'last_name': ['icontains', 'istartswith'], 'position': ['exact'], 'id': ['exact'] } interfaces = (graphene.relay.Node,) class PositionType(DjangoObjectType): """ Position graphQL type. Implemented total_employees and employees objects. """ employees = DjangoFilterPaginateListField( EmployeeType, pagination=LimitOffsetGraphqlPagination() ) total_employees = graphene.Int() def resolve_total_employees(self, info): return self.employees.count() def resolve_employees(self, info): return self.employees.all() class Meta: model = models.Position filter_fields = { 'name': ['exact', 'icontains', 'istartswith'], 'id': ['exact'] } interfaces = (graphene.relay.Node,) class SpecializationType(DjangoObjectType): class Meta: model = models.Specialization filter_fields = { 'name': ['exact', 'icontains', 'istartswith'], 'id': ['exact'], } interfaces = (graphene.relay.Node,)
import graphene from graphene_django.types import DjangoObjectType, ObjectType from graphene_django_extras import ( DjangoFilterPaginateListField, LimitOffsetGraphqlPagination ) from apps.employees import models class EmployeeType(DjangoObjectType): class Meta: model = models.Employee filter_fields = { 'first_name': ['icontains', 'istartswith'], 'last_name': ['icontains', 'istartswith'], 'position': ['exact'], 'id': ['exact'] } class PositionType(DjangoObjectType): """ Position graphQL type. Implemented total_employees and employees objects. """ employees = DjangoFilterPaginateListField( EmployeeType, pagination=LimitOffsetGraphqlPagination() ) total_employees = graphene.Int() def resolve_total_employees(self, info): return self.employees.count() def resolve_employees(self, info): return self.employees.all() class Meta: model = models.Position filter_fields = { 'name': ['exact', 'icontains', 'istartswith'], 'id': ['exact'] } class SpecializationType(DjangoObjectType): class Meta: model = models.Specialization filter_fields = { 'name': ['exact', 'icontains', 'istartswith'], 'id': ['exact'], }
Remove Node interfaces (use origin id for objects)
Remove Node interfaces (use origin id for objects)
Python
mit
wis-software/office-manager
<REPLACE_OLD> } interfaces = (graphene.relay.Node,) class <REPLACE_NEW> } class <REPLACE_END> <REPLACE_OLD> } interfaces = (graphene.relay.Node,) class <REPLACE_NEW> } class <REPLACE_END> <DELETE> interfaces = (graphene.relay.Node,) <DELETE_END> <|endoftext|> import graphene from graphene_django.types import DjangoObjectType, ObjectType from graphene_django_extras import ( DjangoFilterPaginateListField, LimitOffsetGraphqlPagination ) from apps.employees import models class EmployeeType(DjangoObjectType): class Meta: model = models.Employee filter_fields = { 'first_name': ['icontains', 'istartswith'], 'last_name': ['icontains', 'istartswith'], 'position': ['exact'], 'id': ['exact'] } class PositionType(DjangoObjectType): """ Position graphQL type. Implemented total_employees and employees objects. """ employees = DjangoFilterPaginateListField( EmployeeType, pagination=LimitOffsetGraphqlPagination() ) total_employees = graphene.Int() def resolve_total_employees(self, info): return self.employees.count() def resolve_employees(self, info): return self.employees.all() class Meta: model = models.Position filter_fields = { 'name': ['exact', 'icontains', 'istartswith'], 'id': ['exact'] } class SpecializationType(DjangoObjectType): class Meta: model = models.Specialization filter_fields = { 'name': ['exact', 'icontains', 'istartswith'], 'id': ['exact'], }
Remove Node interfaces (use origin id for objects) import graphene from graphene_django.types import DjangoObjectType, ObjectType from graphene_django_extras import ( DjangoFilterPaginateListField, LimitOffsetGraphqlPagination ) from apps.employees import models class EmployeeType(DjangoObjectType): class Meta: model = models.Employee filter_fields = { 'first_name': ['icontains', 'istartswith'], 'last_name': ['icontains', 'istartswith'], 'position': ['exact'], 'id': ['exact'] } interfaces = (graphene.relay.Node,) class PositionType(DjangoObjectType): """ Position graphQL type. Implemented total_employees and employees objects. """ employees = DjangoFilterPaginateListField( EmployeeType, pagination=LimitOffsetGraphqlPagination() ) total_employees = graphene.Int() def resolve_total_employees(self, info): return self.employees.count() def resolve_employees(self, info): return self.employees.all() class Meta: model = models.Position filter_fields = { 'name': ['exact', 'icontains', 'istartswith'], 'id': ['exact'] } interfaces = (graphene.relay.Node,) class SpecializationType(DjangoObjectType): class Meta: model = models.Specialization filter_fields = { 'name': ['exact', 'icontains', 'istartswith'], 'id': ['exact'], } interfaces = (graphene.relay.Node,)
a0342631d6888f4748af9011839020ee0843a721
crypto_enigma/_version.py
crypto_enigma/_version.py
#!/usr/bin/env python # encoding: utf8 from __future__ import (absolute_import, print_function, division, unicode_literals) # See - http://www.python.org/dev/peps/pep-0440/ # See - http://semver.org __author__ = 'Roy Levien' __copyright__ = '(c) 2014-2015 Roy Levien' __release__ = '0.2.1' # N(.N)* __pre_release__ = 'b3' # aN | bN | cN | __suffix__ = '.dev1' # .devN | | .postN __version__ = __release__ + __pre_release__ + __suffix__
#!/usr/bin/env python # encoding: utf8 from __future__ import (absolute_import, print_function, division, unicode_literals) # See - http://www.python.org/dev/peps/pep-0440/ # See - http://semver.org __author__ = 'Roy Levien' __copyright__ = '(c) 2014-2015 Roy Levien' __release__ = '0.2.1' # N(.N)* __pre_release__ = 'b3' # aN | bN | cN | __suffix__ = '.dev2' # .devN | | .postN __version__ = __release__ + __pre_release__ + __suffix__
Update test version after test release
Update test version after test release
Python
bsd-3-clause
orome/crypto-enigma-py
<REPLACE_OLD> '.dev1' <REPLACE_NEW> '.dev2' <REPLACE_END> <|endoftext|> #!/usr/bin/env python # encoding: utf8 from __future__ import (absolute_import, print_function, division, unicode_literals) # See - http://www.python.org/dev/peps/pep-0440/ # See - http://semver.org __author__ = 'Roy Levien' __copyright__ = '(c) 2014-2015 Roy Levien' __release__ = '0.2.1' # N(.N)* __pre_release__ = 'b3' # aN | bN | cN | __suffix__ = '.dev2' # .devN | | .postN __version__ = __release__ + __pre_release__ + __suffix__
Update test version after test release #!/usr/bin/env python # encoding: utf8 from __future__ import (absolute_import, print_function, division, unicode_literals) # See - http://www.python.org/dev/peps/pep-0440/ # See - http://semver.org __author__ = 'Roy Levien' __copyright__ = '(c) 2014-2015 Roy Levien' __release__ = '0.2.1' # N(.N)* __pre_release__ = 'b3' # aN | bN | cN | __suffix__ = '.dev1' # .devN | | .postN __version__ = __release__ + __pre_release__ + __suffix__
fd99ef86dfca50dbd36b2c1a022cf30a0720dbea
scrapy/squeues.py
scrapy/squeues.py
""" Scheduler queues """ import marshal from six.moves import cPickle as pickle from queuelib import queue def _serializable_queue(queue_class, serialize, deserialize): class SerializableQueue(queue_class): def push(self, obj): s = serialize(obj) super(SerializableQueue, self).push(s) def pop(self): s = super(SerializableQueue, self).pop() if s: return deserialize(s) return SerializableQueue def _pickle_serialize(obj): try: return pickle.dumps(obj, protocol=2) except pickle.PicklingError as e: raise ValueError(str(e)) PickleFifoDiskQueue = _serializable_queue(queue.FifoDiskQueue, \ _pickle_serialize, pickle.loads) PickleLifoDiskQueue = _serializable_queue(queue.LifoDiskQueue, \ _pickle_serialize, pickle.loads) MarshalFifoDiskQueue = _serializable_queue(queue.FifoDiskQueue, \ marshal.dumps, marshal.loads) MarshalLifoDiskQueue = _serializable_queue(queue.LifoDiskQueue, \ marshal.dumps, marshal.loads) FifoMemoryQueue = queue.FifoMemoryQueue LifoMemoryQueue = queue.LifoMemoryQueue
""" Scheduler queues """ import marshal from six.moves import cPickle as pickle from queuelib import queue def _serializable_queue(queue_class, serialize, deserialize): class SerializableQueue(queue_class): def push(self, obj): s = serialize(obj) super(SerializableQueue, self).push(s) def pop(self): s = super(SerializableQueue, self).pop() if s: return deserialize(s) return SerializableQueue def _pickle_serialize(obj): try: return pickle.dumps(obj, protocol=2) # Python>=3.5 raises AttributeError here while # Python<=3.4 raises pickle.PicklingError except (pickle.PicklingError, AttributeError) as e: raise ValueError(str(e)) PickleFifoDiskQueue = _serializable_queue(queue.FifoDiskQueue, \ _pickle_serialize, pickle.loads) PickleLifoDiskQueue = _serializable_queue(queue.LifoDiskQueue, \ _pickle_serialize, pickle.loads) MarshalFifoDiskQueue = _serializable_queue(queue.FifoDiskQueue, \ marshal.dumps, marshal.loads) MarshalLifoDiskQueue = _serializable_queue(queue.LifoDiskQueue, \ marshal.dumps, marshal.loads) FifoMemoryQueue = queue.FifoMemoryQueue LifoMemoryQueue = queue.LifoMemoryQueue
Test for AttributeError when pickling objects (Python>=3.5)
Test for AttributeError when pickling objects (Python>=3.5) Same "fix" as in e.g. https://github.com/joblib/joblib/pull/246
Python
bsd-3-clause
YeelerG/scrapy,wenyu1001/scrapy,GregoryVigoTorres/scrapy,crasker/scrapy,jdemaeyer/scrapy,Parlin-Galanodel/scrapy,arush0311/scrapy,Digenis/scrapy,pablohoffman/scrapy,ArturGaspar/scrapy,kmike/scrapy,crasker/scrapy,eLRuLL/scrapy,wujuguang/scrapy,carlosp420/scrapy,darkrho/scrapy-scrapy,redapple/scrapy,carlosp420/scrapy,barraponto/scrapy,barraponto/scrapy,rolando/scrapy,cyberplant/scrapy,arush0311/scrapy,Parlin-Galanodel/scrapy,scrapy/scrapy,umrashrf/scrapy,crasker/scrapy,w495/scrapy,finfish/scrapy,redapple/scrapy,rolando/scrapy,rootAvish/scrapy,wenyu1001/scrapy,rolando/scrapy,shaform/scrapy,rolando-contrib/scrapy,starrify/scrapy,darkrho/scrapy-scrapy,ssteo/scrapy,wujuguang/scrapy,Ryezhang/scrapy,Parlin-Galanodel/scrapy,Zephor5/scrapy,GregoryVigoTorres/scrapy,YeelerG/scrapy,jc0n/scrapy,rklabs/scrapy,shaform/scrapy,Ryezhang/scrapy,Digenis/scrapy,elacuesta/scrapy,ssteo/scrapy,wujuguang/scrapy,elacuesta/scrapy,darkrho/scrapy-scrapy,kmike/scrapy,umrashrf/scrapy,ArturGaspar/scrapy,taito/scrapy,YeelerG/scrapy,dangra/scrapy,elacuesta/scrapy,barraponto/scrapy,pawelmhm/scrapy,scrapy/scrapy,finfish/scrapy,Ryezhang/scrapy,foromer4/scrapy,pawelmhm/scrapy,scrapy/scrapy,rootAvish/scrapy,pablohoffman/scrapy,w495/scrapy,GregoryVigoTorres/scrapy,finfish/scrapy,starrify/scrapy,shaform/scrapy,pablohoffman/scrapy,taito/scrapy,kmike/scrapy,rolando-contrib/scrapy,carlosp420/scrapy,taito/scrapy,eLRuLL/scrapy,dangra/scrapy,dangra/scrapy,pawelmhm/scrapy,jdemaeyer/scrapy,foromer4/scrapy,rootAvish/scrapy,cyberplant/scrapy,eLRuLL/scrapy,Digenis/scrapy,umrashrf/scrapy,rklabs/scrapy,wenyu1001/scrapy,dracony/scrapy,redapple/scrapy,Zephor5/scrapy,jc0n/scrapy,ArturGaspar/scrapy,arush0311/scrapy,Zephor5/scrapy,w495/scrapy,ssteo/scrapy,starrify/scrapy,rolando-contrib/scrapy,dracony/scrapy,dracony/scrapy,jc0n/scrapy,foromer4/scrapy,jdemaeyer/scrapy,cyberplant/scrapy,rklabs/scrapy
<INSERT> # Python>=3.5 raises AttributeError here while # Python<=3.4 raises pickle.PicklingError <INSERT_END> <REPLACE_OLD> pickle.PicklingError <REPLACE_NEW> (pickle.PicklingError, AttributeError) <REPLACE_END> <|endoftext|> """ Scheduler queues """ import marshal from six.moves import cPickle as pickle from queuelib import queue def _serializable_queue(queue_class, serialize, deserialize): class SerializableQueue(queue_class): def push(self, obj): s = serialize(obj) super(SerializableQueue, self).push(s) def pop(self): s = super(SerializableQueue, self).pop() if s: return deserialize(s) return SerializableQueue def _pickle_serialize(obj): try: return pickle.dumps(obj, protocol=2) # Python>=3.5 raises AttributeError here while # Python<=3.4 raises pickle.PicklingError except (pickle.PicklingError, AttributeError) as e: raise ValueError(str(e)) PickleFifoDiskQueue = _serializable_queue(queue.FifoDiskQueue, \ _pickle_serialize, pickle.loads) PickleLifoDiskQueue = _serializable_queue(queue.LifoDiskQueue, \ _pickle_serialize, pickle.loads) MarshalFifoDiskQueue = _serializable_queue(queue.FifoDiskQueue, \ marshal.dumps, marshal.loads) MarshalLifoDiskQueue = _serializable_queue(queue.LifoDiskQueue, \ marshal.dumps, marshal.loads) FifoMemoryQueue = queue.FifoMemoryQueue LifoMemoryQueue = queue.LifoMemoryQueue
Test for AttributeError when pickling objects (Python>=3.5) Same "fix" as in e.g. https://github.com/joblib/joblib/pull/246 """ Scheduler queues """ import marshal from six.moves import cPickle as pickle from queuelib import queue def _serializable_queue(queue_class, serialize, deserialize): class SerializableQueue(queue_class): def push(self, obj): s = serialize(obj) super(SerializableQueue, self).push(s) def pop(self): s = super(SerializableQueue, self).pop() if s: return deserialize(s) return SerializableQueue def _pickle_serialize(obj): try: return pickle.dumps(obj, protocol=2) except pickle.PicklingError as e: raise ValueError(str(e)) PickleFifoDiskQueue = _serializable_queue(queue.FifoDiskQueue, \ _pickle_serialize, pickle.loads) PickleLifoDiskQueue = _serializable_queue(queue.LifoDiskQueue, \ _pickle_serialize, pickle.loads) MarshalFifoDiskQueue = _serializable_queue(queue.FifoDiskQueue, \ marshal.dumps, marshal.loads) MarshalLifoDiskQueue = _serializable_queue(queue.LifoDiskQueue, \ marshal.dumps, marshal.loads) FifoMemoryQueue = queue.FifoMemoryQueue LifoMemoryQueue = queue.LifoMemoryQueue
4cf8ae2ab95e9c7ed1a091532f12a4211f7580b7
textingtree.py
textingtree.py
import os import requests import tinycss2 from tinycss2 import color3 from flask import Flask, Response, request app = Flask(__name__) @app.route('/', methods=['GET']) def merry_christmas(): return 'Merry Christmas!' @app.route('/sms', methods=['POST']) def sms(): body = request.values.get('Body', None) if body is None: return Response(mimetype='text/plain') sms = body.lower() print sms rgba = tinycss2.color3.parse_color(sms) if rgba is None: return Response("Sorry, I don't recognize that color.", mimetype='text/plain') if len(rgba) == 4: red = int(round(255*rgba[0])) green = int(round(255*rgba[1])) blue = int(round(255*rgba[2])) rgb_string = '[{0:03d},{1:03d},{2:03d}]'.format(red, green, blue) payload = {'access_token': os.environ['SPARK_ACCESS_TOKEN'], 'command': rgb_string} r = requests.post("https://api.spark.io/v1/devices/{0}/color".format(os.environ['SPARK_CORE_ID']), data=payload) return Response(mimetype='text/plain') if __name__ == '__main__': app.run()
Add application code with SMS route to accept SMS and route to Spark Core via their API
Add application code with SMS route to accept SMS and route to Spark Core via their API
Python
mit
willdages/The-Texting-Tree
<INSERT> import os import requests import tinycss2 from tinycss2 import color3 from flask import Flask, Response, request app = Flask(__name__) @app.route('/', methods=['GET']) def merry_christmas(): <INSERT_END> <INSERT> return 'Merry Christmas!' @app.route('/sms', methods=['POST']) def sms(): body = request.values.get('Body', None) if body is None: return Response(mimetype='text/plain') sms = body.lower() print sms rgba = tinycss2.color3.parse_color(sms) if rgba is None: return Response("Sorry, I don't recognize that color.", mimetype='text/plain') if len(rgba) == 4: red = int(round(255*rgba[0])) green = int(round(255*rgba[1])) blue = int(round(255*rgba[2])) rgb_string = '[{0:03d},{1:03d},{2:03d}]'.format(red, green, blue) payload = {'access_token': os.environ['SPARK_ACCESS_TOKEN'], 'command': rgb_string} r = requests.post("https://api.spark.io/v1/devices/{0}/color".format(os.environ['SPARK_CORE_ID']), data=payload) return Response(mimetype='text/plain') if __name__ == '__main__': app.run() <INSERT_END> <|endoftext|> import os import requests import tinycss2 from tinycss2 import color3 from flask import Flask, Response, request app = Flask(__name__) @app.route('/', methods=['GET']) def merry_christmas(): return 'Merry Christmas!' @app.route('/sms', methods=['POST']) def sms(): body = request.values.get('Body', None) if body is None: return Response(mimetype='text/plain') sms = body.lower() print sms rgba = tinycss2.color3.parse_color(sms) if rgba is None: return Response("Sorry, I don't recognize that color.", mimetype='text/plain') if len(rgba) == 4: red = int(round(255*rgba[0])) green = int(round(255*rgba[1])) blue = int(round(255*rgba[2])) rgb_string = '[{0:03d},{1:03d},{2:03d}]'.format(red, green, blue) payload = {'access_token': os.environ['SPARK_ACCESS_TOKEN'], 'command': rgb_string} r = requests.post("https://api.spark.io/v1/devices/{0}/color".format(os.environ['SPARK_CORE_ID']), data=payload) return Response(mimetype='text/plain') if __name__ == '__main__': app.run()
Add application code with SMS route to accept SMS and route to Spark Core via their API
380331a54ae09a54e458b30a0fb6a459faa76f37
emission/analysis/point_features.py
emission/analysis/point_features.py
# Standard imports import math import logging import numpy as np import emission.core.common as ec import emission.analysis.section_features as sf def calDistance(point1, point2): return ec.calDistance([point1.longitude, point1.latitude], [point2.longitude, point2.latitude]) def calHeading(point1, point2): return sf.calHeading([point1.longitude, point1.latitude], [point2.longitude, point2.latitude]) def calHC(point1, point2, point3): return sf.calHC([point1.longitude, point1.latitude], [point2.longitude, point2.latitude], [point3.longitude, point3.latitude]) def calSpeed(point1, point2): distanceDelta = calDistance(point1, point2) timeDelta = point2.mTime - point1.mTime # print "Distance delta = %s and time delta = %s" % (distanceDelta, timeDelta) # assert(timeDelta != 0) if (timeDelta == 0): logging.debug("timeDelta = 0, distanceDelta = %s, returning speed = 0") assert(distanceDelta < 0.01) return 0 # TODO: Once we perform the conversions from ms to secs as part of the # usercache -> timeseries switch, we need to remove this division by 1000 return distanceDelta/(float(timeDelta)/1000)
# Standard imports import math import logging import numpy as np import emission.core.common as ec import emission.analysis.section_features as sf def calDistance(point1, point2): return ec.calDistance([point1.longitude, point1.latitude], [point2.longitude, point2.latitude]) def calHeading(point1, point2): return sf.calHeading([point1.longitude, point1.latitude], [point2.longitude, point2.latitude]) def calHC(point1, point2, point3): return sf.calHC([point1.longitude, point1.latitude], [point2.longitude, point2.latitude], [point3.longitude, point3.latitude]) def calSpeed(point1, point2): distanceDelta = calDistance(point1, point2) timeDelta = point2.ts - point1.ts # print "Distance delta = %s and time delta = %s" % (distanceDelta, timeDelta) # assert(timeDelta != 0) if (timeDelta == 0): logging.debug("timeDelta = 0, distanceDelta = %s, returning speed = 0") assert(distanceDelta < 0.01) return 0 return distanceDelta/timeDelta
Change the feature calculation to match the new unified format
Change the feature calculation to match the new unified format - the timestamps are now in seconds, so no need to divide them - the field is called ts, not mTime
Python
bsd-3-clause
e-mission/e-mission-server,e-mission/e-mission-server,shankari/e-mission-server,sunil07t/e-mission-server,joshzarrabi/e-mission-server,e-mission/e-mission-server,shankari/e-mission-server,joshzarrabi/e-mission-server,joshzarrabi/e-mission-server,yw374cornell/e-mission-server,sunil07t/e-mission-server,sunil07t/e-mission-server,shankari/e-mission-server,yw374cornell/e-mission-server,joshzarrabi/e-mission-server,yw374cornell/e-mission-server,shankari/e-mission-server,e-mission/e-mission-server,sunil07t/e-mission-server,yw374cornell/e-mission-server
<REPLACE_OLD> point2.mTime <REPLACE_NEW> point2.ts <REPLACE_END> <REPLACE_OLD> point1.mTime <REPLACE_NEW> point1.ts <REPLACE_END> <DELETE> # TODO: Once we perform the conversions from ms to secs as part of the # usercache -> timeseries switch, we need to remove this division by 1000 <DELETE_END> <REPLACE_OLD> distanceDelta/(float(timeDelta)/1000) <REPLACE_NEW> distanceDelta/timeDelta <REPLACE_END> <|endoftext|> # Standard imports import math import logging import numpy as np import emission.core.common as ec import emission.analysis.section_features as sf def calDistance(point1, point2): return ec.calDistance([point1.longitude, point1.latitude], [point2.longitude, point2.latitude]) def calHeading(point1, point2): return sf.calHeading([point1.longitude, point1.latitude], [point2.longitude, point2.latitude]) def calHC(point1, point2, point3): return sf.calHC([point1.longitude, point1.latitude], [point2.longitude, point2.latitude], [point3.longitude, point3.latitude]) def calSpeed(point1, point2): distanceDelta = calDistance(point1, point2) timeDelta = point2.ts - point1.ts # print "Distance delta = %s and time delta = %s" % (distanceDelta, timeDelta) # assert(timeDelta != 0) if (timeDelta == 0): logging.debug("timeDelta = 0, distanceDelta = %s, returning speed = 0") assert(distanceDelta < 0.01) return 0 return distanceDelta/timeDelta
Change the feature calculation to match the new unified format - the timestamps are now in seconds, so no need to divide them - the field is called ts, not mTime # Standard imports import math import logging import numpy as np import emission.core.common as ec import emission.analysis.section_features as sf def calDistance(point1, point2): return ec.calDistance([point1.longitude, point1.latitude], [point2.longitude, point2.latitude]) def calHeading(point1, point2): return sf.calHeading([point1.longitude, point1.latitude], [point2.longitude, point2.latitude]) def calHC(point1, point2, point3): return sf.calHC([point1.longitude, point1.latitude], [point2.longitude, point2.latitude], [point3.longitude, point3.latitude]) def calSpeed(point1, point2): distanceDelta = calDistance(point1, point2) timeDelta = point2.mTime - point1.mTime # print "Distance delta = %s and time delta = %s" % (distanceDelta, timeDelta) # assert(timeDelta != 0) if (timeDelta == 0): logging.debug("timeDelta = 0, distanceDelta = %s, returning speed = 0") assert(distanceDelta < 0.01) return 0 # TODO: Once we perform the conversions from ms to secs as part of the # usercache -> timeseries switch, we need to remove this division by 1000 return distanceDelta/(float(timeDelta)/1000)
2bd5887a62d0f6bfd6f9290604effad322e8ab1e
myElsClient.py
myElsClient.py
import requests class myElsClient: """A class that implements a Python interface to api.elsevier.com""" # local variables __base_url = "https://api.elsevier.com/" # constructors def __init__(self, apiKey): """Instantiates a client with a given API Key.""" self.apiKey = apiKey # configuration functions def setInstToken(self, instToken): """Sets an institutional token for customer authentication""" self.instToken = instToken # utility access functions def getBaseURL(self): """Returns the base URL currently configured for Elsevier's APIs""" return self.__base_url # request/response execution functions def execRequest(self,pathStr,queryStr): """Constructs and send the actual request""" headers = { "X-ELS-APIKey" : self.apiKey } r = requests.get( self.__base_url + pathStr + queryStr, headers = headers ) return r
import requests class myElsClient: """A class that implements a Python interface to api.elsevier.com""" # local variables __base_url = "https://api.elsevier.com/" # constructors def __init__(self, apiKey): """Instantiates a client with a given API Key.""" self.apiKey = apiKey # configuration functions def setInstToken(self, instToken): """Sets an institutional token for customer authentication""" self.instToken = instToken # utility access functions def getBaseURL(self): """Returns the base URL currently configured for Elsevier's APIs""" return self.__base_url # request/response execution functions def execRequest(self,pathStr,queryStr): """Constructs and sends the actual request; returns response.""" headers = { "X-ELS-APIKey" : self.apiKey } r = requests.get( self.__base_url + pathStr + queryStr, headers = headers ) if r.status_code == 200: return r else: print "error"
Add basic HTTP error handling.
Add basic HTTP error handling.
Python
bsd-3-clause
ElsevierDev/elsapy
<REPLACE_OLD> send <REPLACE_NEW> sends <REPLACE_END> <REPLACE_OLD> request""" <REPLACE_NEW> request; returns response.""" <REPLACE_END> <INSERT> if r.status_code == 200: <INSERT_END> <INSERT> else: print "error" <INSERT_END> <|endoftext|> import requests class myElsClient: """A class that implements a Python interface to api.elsevier.com""" # local variables __base_url = "https://api.elsevier.com/" # constructors def __init__(self, apiKey): """Instantiates a client with a given API Key.""" self.apiKey = apiKey # configuration functions def setInstToken(self, instToken): """Sets an institutional token for customer authentication""" self.instToken = instToken # utility access functions def getBaseURL(self): """Returns the base URL currently configured for Elsevier's APIs""" return self.__base_url # request/response execution functions def execRequest(self,pathStr,queryStr): """Constructs and sends the actual request; returns response.""" headers = { "X-ELS-APIKey" : self.apiKey } r = requests.get( self.__base_url + pathStr + queryStr, headers = headers ) if r.status_code == 200: return r else: print "error"
Add basic HTTP error handling. import requests class myElsClient: """A class that implements a Python interface to api.elsevier.com""" # local variables __base_url = "https://api.elsevier.com/" # constructors def __init__(self, apiKey): """Instantiates a client with a given API Key.""" self.apiKey = apiKey # configuration functions def setInstToken(self, instToken): """Sets an institutional token for customer authentication""" self.instToken = instToken # utility access functions def getBaseURL(self): """Returns the base URL currently configured for Elsevier's APIs""" return self.__base_url # request/response execution functions def execRequest(self,pathStr,queryStr): """Constructs and send the actual request""" headers = { "X-ELS-APIKey" : self.apiKey } r = requests.get( self.__base_url + pathStr + queryStr, headers = headers ) return r
f408346e69f643e603f279c2581fad8c99962b11
service_registry_cli/commands/configuration/remove.py
service_registry_cli/commands/configuration/remove.py
# Copyright 2012 Rackspace # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from service_registry_cli.utils import BaseCommand, get_client class RemoveCommand(BaseCommand): """ Remove a configuration value. """ log = logging.getLogger(__name__) def get_parser(self, prog_name): parser = super(RemoveCommand, self).get_parser(prog_name=prog_name) parser.add_argument('--id', dest='id') return parser def take_action(self, parsed_args): client = get_client(parsed_args) configuration_id = parsed_args.id client.configuration.remove(configuration_id=configuration_id) self.app.stdout.write('Configuration value "%s" has been removed\n' % (configuration_id))
Add a command for removing a configuration value.
Add a command for removing a configuration value.
Python
apache-2.0
racker/python-service-registry-cli,racker/python-service-registry-cli
<REPLACE_OLD> <REPLACE_NEW> # Copyright 2012 Rackspace # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from service_registry_cli.utils import BaseCommand, get_client class RemoveCommand(BaseCommand): """ Remove a configuration value. """ log = logging.getLogger(__name__) def get_parser(self, prog_name): parser = super(RemoveCommand, self).get_parser(prog_name=prog_name) parser.add_argument('--id', dest='id') return parser def take_action(self, parsed_args): client = get_client(parsed_args) configuration_id = parsed_args.id client.configuration.remove(configuration_id=configuration_id) self.app.stdout.write('Configuration value "%s" has been removed\n' % (configuration_id)) <REPLACE_END> <|endoftext|> # Copyright 2012 Rackspace # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from service_registry_cli.utils import BaseCommand, get_client class RemoveCommand(BaseCommand): """ Remove a configuration value. """ log = logging.getLogger(__name__) def get_parser(self, prog_name): parser = super(RemoveCommand, self).get_parser(prog_name=prog_name) parser.add_argument('--id', dest='id') return parser def take_action(self, parsed_args): client = get_client(parsed_args) configuration_id = parsed_args.id client.configuration.remove(configuration_id=configuration_id) self.app.stdout.write('Configuration value "%s" has been removed\n' % (configuration_id))
Add a command for removing a configuration value.
cc0a971bad5f4b2eb81881b8c570eddb2bd144f3
django_vend/stores/forms.py
django_vend/stores/forms.py
from django import forms from django_vend.core.forms import VendDateTimeField from .models import VendOutlet class VendOutletForm(forms.ModelForm): deleted_at = VendDateTimeField(required=False) def __init__(self, data=None, *args, **kwargs): if data: uid = data.pop('id', None) if uid is not None: data['uid'] = uid tax_inc = data.pop('display_prices', None) if tax_inc is not None: if tax_inc == 'inclusive': data['display_prices_tax_inclusive'] = True elif tax_inc == 'exclusive': data['display_prices_tax_inclusive'] = False deleted_at = data.get('deleted_at') if deleted_at is not None and deleted_at == 'null': data['deleted_at'] = None super(VendOutletForm, self).__init__(data, *args, **kwargs) class Meta: model = VendOutlet fields = ['uid', 'name', 'time_zone', 'currency', 'currency_symbol', 'display_prices_tax_inclusive', 'deleted_at']
from django import forms from django_vend.core.forms import VendDateTimeField from .models import VendOutlet class VendOutletForm(forms.ModelForm): deleted_at = VendDateTimeField(required=False) def __init__(self, data=None, *args, **kwargs): if data: uid = data.pop('id', None) if uid is not None: data['uid'] = uid tax_inc = data.pop('display_prices', None) if tax_inc is not None: if tax_inc == 'inclusive': data['display_prices_tax_inclusive'] = True elif tax_inc == 'exclusive': data['display_prices_tax_inclusive'] = False deleted_at = data.get('deleted_at') if deleted_at is not None and deleted_at == 'null': data['deleted_at'] = None if 'instance' not in kwargs or kwargs['instance'] is None: # Note: currently assumes instance is always passed as a kwarg # - need to check but this is probably bad try: kwargs['instance'] = VendOutlet.objects.get(uid=uid) except VendOutlet.DoesNotExist: pass super(VendOutletForm, self).__init__(data, *args, **kwargs) class Meta: model = VendOutlet fields = ['uid', 'name', 'time_zone', 'currency', 'currency_symbol', 'display_prices_tax_inclusive', 'deleted_at']
Make form update existing instance if uid matches
Make form update existing instance if uid matches
Python
bsd-3-clause
remarkablerocket/django-vend,remarkablerocket/django-vend
<INSERT> if 'instance' not in kwargs or kwargs['instance'] is None: # Note: currently assumes instance is always passed as a kwarg # - need to check but this is probably bad try: kwargs['instance'] = VendOutlet.objects.get(uid=uid) except VendOutlet.DoesNotExist: pass <INSERT_END> <|endoftext|> from django import forms from django_vend.core.forms import VendDateTimeField from .models import VendOutlet class VendOutletForm(forms.ModelForm): deleted_at = VendDateTimeField(required=False) def __init__(self, data=None, *args, **kwargs): if data: uid = data.pop('id', None) if uid is not None: data['uid'] = uid tax_inc = data.pop('display_prices', None) if tax_inc is not None: if tax_inc == 'inclusive': data['display_prices_tax_inclusive'] = True elif tax_inc == 'exclusive': data['display_prices_tax_inclusive'] = False deleted_at = data.get('deleted_at') if deleted_at is not None and deleted_at == 'null': data['deleted_at'] = None if 'instance' not in kwargs or kwargs['instance'] is None: # Note: currently assumes instance is always passed as a kwarg # - need to check but this is probably bad try: kwargs['instance'] = VendOutlet.objects.get(uid=uid) except VendOutlet.DoesNotExist: pass super(VendOutletForm, self).__init__(data, *args, **kwargs) class Meta: model = VendOutlet fields = ['uid', 'name', 'time_zone', 'currency', 'currency_symbol', 'display_prices_tax_inclusive', 'deleted_at']
Make form update existing instance if uid matches from django import forms from django_vend.core.forms import VendDateTimeField from .models import VendOutlet class VendOutletForm(forms.ModelForm): deleted_at = VendDateTimeField(required=False) def __init__(self, data=None, *args, **kwargs): if data: uid = data.pop('id', None) if uid is not None: data['uid'] = uid tax_inc = data.pop('display_prices', None) if tax_inc is not None: if tax_inc == 'inclusive': data['display_prices_tax_inclusive'] = True elif tax_inc == 'exclusive': data['display_prices_tax_inclusive'] = False deleted_at = data.get('deleted_at') if deleted_at is not None and deleted_at == 'null': data['deleted_at'] = None super(VendOutletForm, self).__init__(data, *args, **kwargs) class Meta: model = VendOutlet fields = ['uid', 'name', 'time_zone', 'currency', 'currency_symbol', 'display_prices_tax_inclusive', 'deleted_at']
e45f394c61620db13bae579a29043dfdd6ae2d0f
SLA_bot/alertfeed.py
SLA_bot/alertfeed.py
import asyncio import json import aiohttp import SLA_bot.config as cf class AlertFeed: source_url = 'http://pso2emq.flyergo.eu/api/v2/' async def download(url): try: async with aiohttp.get(url) as response: return await response.json() except json.decoder.JSONDecodeError: pass def parse_data(data): latest_alert = data[0]['text'] lines = latest_alert.splitlines() header = '-' * len(lines[0]) lines.insert(1, header) text = '\n'.join(lines) return '```fix\n{}\n```'.format(text) async def fetch(): header = cf.get('PSO2 Feed', 'header') raw_data = await AlertFeed.download(AlertFeed.source_url) return '** **\n' + header + '\n' + AlertFeed.parse_data(raw_data)
import asyncio import json import aiohttp import SLA_bot.config as cf class AlertFeed: source_url = 'http://pso2emq.flyergo.eu/api/v2/' async def download(url): try: async with aiohttp.get(url) as response: return await response.json() except json.decoder.JSONDecodeError: pass def parse_data(data): latest_alert = data[0]['text'] lines = latest_alert.splitlines() code_color = 'fix' if len(lines) >= 10 else '' header = '-' * len(lines[0]) lines.insert(1, header) text = '\n'.join(lines) return '```{}\n{}\n```'.format(code_color, text) async def fetch(): header = cf.get('PSO2 Feed', 'header') raw_data = await AlertFeed.download(AlertFeed.source_url) return '** **\n' + header + '\n' + AlertFeed.parse_data(raw_data)
Remove text coloring in AlertFeed if it seems like scheduled text
Remove text coloring in AlertFeed if it seems like scheduled text
Python
mit
EsqWiggles/SLA-bot,EsqWiggles/SLA-bot
<INSERT> code_color = 'fix' if len(lines) >= 10 else '' <INSERT_END> <REPLACE_OLD> '```fix\n{}\n```'.format(text) <REPLACE_NEW> '```{}\n{}\n```'.format(code_color, text) <REPLACE_END> <|endoftext|> import asyncio import json import aiohttp import SLA_bot.config as cf class AlertFeed: source_url = 'http://pso2emq.flyergo.eu/api/v2/' async def download(url): try: async with aiohttp.get(url) as response: return await response.json() except json.decoder.JSONDecodeError: pass def parse_data(data): latest_alert = data[0]['text'] lines = latest_alert.splitlines() code_color = 'fix' if len(lines) >= 10 else '' header = '-' * len(lines[0]) lines.insert(1, header) text = '\n'.join(lines) return '```{}\n{}\n```'.format(code_color, text) async def fetch(): header = cf.get('PSO2 Feed', 'header') raw_data = await AlertFeed.download(AlertFeed.source_url) return '** **\n' + header + '\n' + AlertFeed.parse_data(raw_data)
Remove text coloring in AlertFeed if it seems like scheduled text import asyncio import json import aiohttp import SLA_bot.config as cf class AlertFeed: source_url = 'http://pso2emq.flyergo.eu/api/v2/' async def download(url): try: async with aiohttp.get(url) as response: return await response.json() except json.decoder.JSONDecodeError: pass def parse_data(data): latest_alert = data[0]['text'] lines = latest_alert.splitlines() header = '-' * len(lines[0]) lines.insert(1, header) text = '\n'.join(lines) return '```fix\n{}\n```'.format(text) async def fetch(): header = cf.get('PSO2 Feed', 'header') raw_data = await AlertFeed.download(AlertFeed.source_url) return '** **\n' + header + '\n' + AlertFeed.parse_data(raw_data)
bf17a86bccf25ead90d11dd15a900cb784d9cb9f
raco/myrial/myrial_test.py
raco/myrial/myrial_test.py
import collections import math import unittest import raco.fakedb import raco.myrial.interpreter as interpreter import raco.myrial.parser as parser from raco.myrialang import compile_to_json class MyrialTestCase(unittest.TestCase): def setUp(self): self.db = raco.fakedb.FakeDatabase() self.parser = parser.Parser() self.processor = interpreter.StatementProcessor(self.db) def execute_query(self, query, test_logical=False): '''Run a test query against the fake database''' statements = self.parser.parse(query) self.processor.evaluate(statements) if test_logical: plan = self.processor.get_logical_plan() else: plan = self.processor.get_physical_plan() json = compile_to_json(query, '', [('A', plan)]) self.db.evaluate(plan) return self.db.get_temp_table('__OUTPUT0__') def run_test(self, query, expected, test_logical=False): '''Execute a test query with an expected output''' actual = self.execute_query(query, test_logical) self.assertEquals(actual, expected)
import collections import math import unittest import raco.fakedb import raco.myrial.interpreter as interpreter import raco.myrial.parser as parser class MyrialTestCase(unittest.TestCase): def setUp(self): self.db = raco.fakedb.FakeDatabase() self.parser = parser.Parser() self.processor = interpreter.StatementProcessor(self.db) def execute_query(self, query, test_logical=False): '''Run a test query against the fake database''' statements = self.parser.parse(query) self.processor.evaluate(statements) if test_logical: plan = self.processor.get_logical_plan() else: plan = self.processor.get_physical_plan() self.db.evaluate(plan) return self.db.get_temp_table('__OUTPUT0__') def run_test(self, query, expected, test_logical=False): '''Execute a test query with an expected output''' actual = self.execute_query(query, test_logical) self.assertEquals(actual, expected)
Revert "Add compile_to_json invocation in Myrial test fixture"
Revert "Add compile_to_json invocation in Myrial test fixture" This reverts commit ceb848021d5323b5bad8518ac7ed850a51fc89ca.
Python
bsd-3-clause
uwescience/raco,uwescience/raco,uwescience/raco,uwescience/raco,uwescience/raco
<REPLACE_OLD> parser from raco.myrialang import compile_to_json class <REPLACE_NEW> parser class <REPLACE_END> <REPLACE_OLD> json = compile_to_json(query, '', [('A', plan)]) self.db.evaluate(plan) <REPLACE_NEW> self.db.evaluate(plan) <REPLACE_END> <|endoftext|> import collections import math import unittest import raco.fakedb import raco.myrial.interpreter as interpreter import raco.myrial.parser as parser class MyrialTestCase(unittest.TestCase): def setUp(self): self.db = raco.fakedb.FakeDatabase() self.parser = parser.Parser() self.processor = interpreter.StatementProcessor(self.db) def execute_query(self, query, test_logical=False): '''Run a test query against the fake database''' statements = self.parser.parse(query) self.processor.evaluate(statements) if test_logical: plan = self.processor.get_logical_plan() else: plan = self.processor.get_physical_plan() self.db.evaluate(plan) return self.db.get_temp_table('__OUTPUT0__') def run_test(self, query, expected, test_logical=False): '''Execute a test query with an expected output''' actual = self.execute_query(query, test_logical) self.assertEquals(actual, expected)
Revert "Add compile_to_json invocation in Myrial test fixture" This reverts commit ceb848021d5323b5bad8518ac7ed850a51fc89ca. import collections import math import unittest import raco.fakedb import raco.myrial.interpreter as interpreter import raco.myrial.parser as parser from raco.myrialang import compile_to_json class MyrialTestCase(unittest.TestCase): def setUp(self): self.db = raco.fakedb.FakeDatabase() self.parser = parser.Parser() self.processor = interpreter.StatementProcessor(self.db) def execute_query(self, query, test_logical=False): '''Run a test query against the fake database''' statements = self.parser.parse(query) self.processor.evaluate(statements) if test_logical: plan = self.processor.get_logical_plan() else: plan = self.processor.get_physical_plan() json = compile_to_json(query, '', [('A', plan)]) self.db.evaluate(plan) return self.db.get_temp_table('__OUTPUT0__') def run_test(self, query, expected, test_logical=False): '''Execute a test query with an expected output''' actual = self.execute_query(query, test_logical) self.assertEquals(actual, expected)
038b56134017b6b3e4ea44d1b7197bc5168868d3
safeopt/__init__.py
safeopt/__init__.py
""" The `safeopt` package provides... Main classes ============ .. autosummary:: SafeOpt SafeOptSwarm Utilities ========= .. autosummary:: sample_gp_function linearly_spaced_combinations plot_2d_gp plot_3d_gp plot_contour_gp """ from __future__ import absolute_import from .utilities import * from .gp_opt import * __all__ = [s for s in dir() if not s.startswith('_')]
""" The `safeopt` package provides... Main classes ============ These classes provide the main functionality for Safe Bayesian optimization. .. autosummary:: SafeOpt SafeOptSwarm Utilities ========= The following are utilities to make testing and working with the library more pleasant. .. autosummary:: sample_gp_function linearly_spaced_combinations plot_2d_gp plot_3d_gp plot_contour_gp """ from __future__ import absolute_import from .utilities import * from .gp_opt import * __all__ = [s for s in dir() if not s.startswith('_')]
Add short comment to docs
Add short comment to docs
Python
mit
befelix/SafeOpt,befelix/SafeOpt
<REPLACE_OLD> classes ============ .. <REPLACE_NEW> classes ============ These classes provide the main functionality for Safe Bayesian optimization. .. <REPLACE_END> <REPLACE_OLD> SafeOptSwarm Utilities ========= .. <REPLACE_NEW> SafeOptSwarm Utilities ========= The following are utilities to make testing and working with the library more pleasant. .. <REPLACE_END> <|endoftext|> """ The `safeopt` package provides... Main classes ============ These classes provide the main functionality for Safe Bayesian optimization. .. autosummary:: SafeOpt SafeOptSwarm Utilities ========= The following are utilities to make testing and working with the library more pleasant. .. autosummary:: sample_gp_function linearly_spaced_combinations plot_2d_gp plot_3d_gp plot_contour_gp """ from __future__ import absolute_import from .utilities import * from .gp_opt import * __all__ = [s for s in dir() if not s.startswith('_')]
Add short comment to docs """ The `safeopt` package provides... Main classes ============ .. autosummary:: SafeOpt SafeOptSwarm Utilities ========= .. autosummary:: sample_gp_function linearly_spaced_combinations plot_2d_gp plot_3d_gp plot_contour_gp """ from __future__ import absolute_import from .utilities import * from .gp_opt import * __all__ = [s for s in dir() if not s.startswith('_')]
a3bc13ed4943dae80928da4e09765002bb0db60c
nbsetuptools/tests/test_nbsetuptools.py
nbsetuptools/tests/test_nbsetuptools.py
import os import tempfile import unittest from jupyter_core.paths import jupyter_config_dir from ..nbsetuptools import NBSetup class NBSetupTestCase(unittest.TestCase): def setUp(self): self.prefix = tempfile.mkdtemp() self.params = { 'prefix': self.prefix, 'static': os.path.join(os.path.dirname(__file__), 'support'), } def test_initialize(self): assert NBSetup('name').path == jupyter_config_dir() assert NBSetup('name', prefix="/tmp").path == "/tmp/etc/jupyter" def test_install(self): nb_setup = NBSetup('name', **self.params) nb_setup.install() assert os.path.exists( os.path.join(self.prefix, 'share', 'jupyter', 'nbextensions', 'name')) def test_enable(self): nb_setup = NBSetup('name', **self.params) nb_setup.enable() for f in ['notebook.json', 'tree.json', 'edit.json']: assert os.path.exists( os.path.join(self.prefix, 'etc', 'jupyter', 'nbconfig', f) ) if __name__ == '__main__': unittest.main()
import os import tempfile import unittest from jupyter_core.paths import jupyter_config_dir from ..nbsetuptools import NBSetup class NBSetupTestCase(unittest.TestCase): def setUp(self): self.prefix = tempfile.mkdtemp() self.params = { 'prefix': self.prefix, 'static': os.path.join(os.path.dirname(__file__), 'support'), } def test_initialize(self): assert NBSetup('name').path == jupyter_config_dir() # assert NBSetup('name', prefix="/tmp").path == "/tmp/etc/jupyter" def test_install(self): nb_setup = NBSetup('name', **self.params) nb_setup.install() assert os.path.exists( os.path.join(self.prefix, 'share', 'jupyter', 'nbextensions', 'name')) def test_enable(self): nb_setup = NBSetup('name', **self.params) nb_setup.enable() for f in ['notebook.json', 'tree.json', 'edit.json']: assert os.path.exists( os.path.join(self.prefix, 'etc', 'jupyter', 'nbconfig', f) ) if __name__ == '__main__': unittest.main()
Comment out test that doesn't pass on Windows
Comment out test that doesn't pass on Windows It appears to be assuming unix paths, so I'm going on the assumption that it's not a valid test case on Windows.
Python
bsd-3-clause
Anaconda-Server/nbsetuptools,Anaconda-Server/nbsetuptools,Anaconda-Server/nbsetuptools
<INSERT> # <INSERT_END> <|endoftext|> import os import tempfile import unittest from jupyter_core.paths import jupyter_config_dir from ..nbsetuptools import NBSetup class NBSetupTestCase(unittest.TestCase): def setUp(self): self.prefix = tempfile.mkdtemp() self.params = { 'prefix': self.prefix, 'static': os.path.join(os.path.dirname(__file__), 'support'), } def test_initialize(self): assert NBSetup('name').path == jupyter_config_dir() # assert NBSetup('name', prefix="/tmp").path == "/tmp/etc/jupyter" def test_install(self): nb_setup = NBSetup('name', **self.params) nb_setup.install() assert os.path.exists( os.path.join(self.prefix, 'share', 'jupyter', 'nbextensions', 'name')) def test_enable(self): nb_setup = NBSetup('name', **self.params) nb_setup.enable() for f in ['notebook.json', 'tree.json', 'edit.json']: assert os.path.exists( os.path.join(self.prefix, 'etc', 'jupyter', 'nbconfig', f) ) if __name__ == '__main__': unittest.main()
Comment out test that doesn't pass on Windows It appears to be assuming unix paths, so I'm going on the assumption that it's not a valid test case on Windows. import os import tempfile import unittest from jupyter_core.paths import jupyter_config_dir from ..nbsetuptools import NBSetup class NBSetupTestCase(unittest.TestCase): def setUp(self): self.prefix = tempfile.mkdtemp() self.params = { 'prefix': self.prefix, 'static': os.path.join(os.path.dirname(__file__), 'support'), } def test_initialize(self): assert NBSetup('name').path == jupyter_config_dir() assert NBSetup('name', prefix="/tmp").path == "/tmp/etc/jupyter" def test_install(self): nb_setup = NBSetup('name', **self.params) nb_setup.install() assert os.path.exists( os.path.join(self.prefix, 'share', 'jupyter', 'nbextensions', 'name')) def test_enable(self): nb_setup = NBSetup('name', **self.params) nb_setup.enable() for f in ['notebook.json', 'tree.json', 'edit.json']: assert os.path.exists( os.path.join(self.prefix, 'etc', 'jupyter', 'nbconfig', f) ) if __name__ == '__main__': unittest.main()
8a0fc8a9241a7d090f801101cd5324d15e7ae990
heutagogy/views.py
heutagogy/views.py
from heutagogy import app import heutagogy.persistence from flask import request, jsonify, Response import json import datetime import sqlite3 heutagogy.persistence.initialize() @app.route('/') def index(): return 'Hello, world!' @app.route('/api/v1/bookmarks', methods=['POST']) def bookmarks_post(): r = request.get_json() bookmark = dict() try: bookmark['url'] = r['url'] except: return jsonify(message='url field is mandatory'), 400 bookmark['title'] = r['title'] if 'title' in r else bookmark['url'] bookmark['timestamp'] = r['timestamp'] if 'timestamp' in r else datetime.datetime.utcnow().isoformat(' ') result = heutagogy.persistence.save_bookmark(bookmark) return jsonify(**result), 201 @app.route('/api/v1/bookmarks', methods=['GET']) def bookmarks_get(): result = heutagogy.persistence.get_bookmarks() return Response(json.dumps(result), mimetype='application/json')
from heutagogy import app import heutagogy.persistence from flask import request, jsonify, Response import json import datetime import sqlite3 heutagogy.persistence.initialize() @app.route('/') def index(): return 'Hello, world!' @app.route('/api/v1/bookmarks', methods=['POST']) def bookmarks_post(): r = request.get_json(force=True) bookmark = dict() try: bookmark['url'] = r['url'] except: return jsonify(message='url field is mandatory'), 400 bookmark['title'] = r['title'] if 'title' in r else bookmark['url'] bookmark['timestamp'] = r['timestamp'] if 'timestamp' in r else datetime.datetime.utcnow().isoformat(' ') result = heutagogy.persistence.save_bookmark(bookmark) return jsonify(**result), 201 @app.route('/api/v1/bookmarks', methods=['GET']) def bookmarks_get(): result = heutagogy.persistence.get_bookmarks() return Response(json.dumps(result), mimetype='application/json')
Fix reading json from client (ignore mimetype).
Fix reading json from client (ignore mimetype). • http://stackoverflow.com/a/14112400/2517622
Python
agpl-3.0
heutagogy/heutagogy-backend,heutagogy/heutagogy-backend
<REPLACE_OLD> request.get_json() <REPLACE_NEW> request.get_json(force=True) <REPLACE_END> <|endoftext|> from heutagogy import app import heutagogy.persistence from flask import request, jsonify, Response import json import datetime import sqlite3 heutagogy.persistence.initialize() @app.route('/') def index(): return 'Hello, world!' @app.route('/api/v1/bookmarks', methods=['POST']) def bookmarks_post(): r = request.get_json(force=True) bookmark = dict() try: bookmark['url'] = r['url'] except: return jsonify(message='url field is mandatory'), 400 bookmark['title'] = r['title'] if 'title' in r else bookmark['url'] bookmark['timestamp'] = r['timestamp'] if 'timestamp' in r else datetime.datetime.utcnow().isoformat(' ') result = heutagogy.persistence.save_bookmark(bookmark) return jsonify(**result), 201 @app.route('/api/v1/bookmarks', methods=['GET']) def bookmarks_get(): result = heutagogy.persistence.get_bookmarks() return Response(json.dumps(result), mimetype='application/json')
Fix reading json from client (ignore mimetype). • http://stackoverflow.com/a/14112400/2517622 from heutagogy import app import heutagogy.persistence from flask import request, jsonify, Response import json import datetime import sqlite3 heutagogy.persistence.initialize() @app.route('/') def index(): return 'Hello, world!' @app.route('/api/v1/bookmarks', methods=['POST']) def bookmarks_post(): r = request.get_json() bookmark = dict() try: bookmark['url'] = r['url'] except: return jsonify(message='url field is mandatory'), 400 bookmark['title'] = r['title'] if 'title' in r else bookmark['url'] bookmark['timestamp'] = r['timestamp'] if 'timestamp' in r else datetime.datetime.utcnow().isoformat(' ') result = heutagogy.persistence.save_bookmark(bookmark) return jsonify(**result), 201 @app.route('/api/v1/bookmarks', methods=['GET']) def bookmarks_get(): result = heutagogy.persistence.get_bookmarks() return Response(json.dumps(result), mimetype='application/json')
cf748e2bc4f28a11c79555f2e6c3d1f89d027709
tests/test_memory_leak.py
tests/test_memory_leak.py
import resource import pytest from .models import TestModel as DirtyMixinModel pytestmark = pytest.mark.django_db def test_rss_usage(): DirtyMixinModel() rss_1 = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss for _ in range(1000): DirtyMixinModel() rss_2 = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss assert rss_2 == rss_1, 'There is a memory leak!'
import gc import resource import pytest from .models import TestModel as DirtyMixinModel pytestmark = pytest.mark.django_db def test_rss_usage(): DirtyMixinModel() gc.collect() rss_1 = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss for _ in range(1000): DirtyMixinModel() gc.collect() rss_2 = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss assert rss_2 == rss_1, 'There is a memory leak!'
Call gc.collect() before measuring memory usage.
Call gc.collect() before measuring memory usage.
Python
bsd-3-clause
romgar/django-dirtyfields,smn/django-dirtyfields
<INSERT> gc import <INSERT_END> <INSERT> gc.collect() <INSERT_END> <INSERT> gc.collect() <INSERT_END> <|endoftext|> import gc import resource import pytest from .models import TestModel as DirtyMixinModel pytestmark = pytest.mark.django_db def test_rss_usage(): DirtyMixinModel() gc.collect() rss_1 = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss for _ in range(1000): DirtyMixinModel() gc.collect() rss_2 = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss assert rss_2 == rss_1, 'There is a memory leak!'
Call gc.collect() before measuring memory usage. import resource import pytest from .models import TestModel as DirtyMixinModel pytestmark = pytest.mark.django_db def test_rss_usage(): DirtyMixinModel() rss_1 = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss for _ in range(1000): DirtyMixinModel() rss_2 = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss assert rss_2 == rss_1, 'There is a memory leak!'
b698f6925b4629d7473fbe42806f54068d98428a
tests/component/test_component_identidock.py
tests/component/test_component_identidock.py
import sys print(sys.path)
import pytest import requests from time import sleep COMPONENT_INDEX_URL = "http://identidock:5000" COMPONENT_MONSTER_BASE_URL = COMPONENT_INDEX_URL + '/monster' def test_get_mainpage(): print('component tester sleeping for 1 sec to let the identidock app to be ready adn also start its server') sleep(1) page = requests.get(COMPONENT_INDEX_URL) assert page.status_code == 200 assert 'Joe Bloggs' in str(page.text) def test_post_mainpage(): page = requests.post(COMPONENT_INDEX_URL, data=dict(name="Moby Dock")) assert page.status_code == 200 assert 'Moby Dock' in str(page.text) def test_mainpage_html_escaping(): page = requests.post(COMPONENT_INDEX_URL, data=dict(name='"><b>TEST</b><!--')) assert page.status_code == 200 assert '<b>' not in str(page.text) def test_get_identicon_with_valid_name_and_invalid_post_method_should_return_405(): name_hash = 'ABCDEF123456789' page = requests.post('{0}/{1}'.format(COMPONENT_MONSTER_BASE_URL, name_hash)) assert page.status_code == 405 def test_get_identicon_with_valid_name_and_cache_miss(): name_hash = 'ABCDEF123456789' page = requests.get('{0}/{1}'.format(COMPONENT_MONSTER_BASE_URL, name_hash)) # print('page.content : {0}'.format(page.content)) assert page.status_code == 200 def test_get_identicon_with_valid_name_and_cache_hit(): name_hash = 'ABCDEF123456789' page = requests.get('{0}/{1}'.format(COMPONENT_MONSTER_BASE_URL, name_hash)) # print('page.content : {0}'.format(page.content)) assert page.status_code == 200 def test_get_identicon_with_insecure_and_unescaped_invalid_name_hash(): invalid_name_hash = '<b>;i_am_invalid|name <{"' page = requests.get('{0}/{1}'.format(COMPONENT_MONSTER_BASE_URL, invalid_name_hash)) # print('page.content : {0}'.format(page.content)) assert page.status_code == 200 if __name__ == '__main__': # unittest.main() pytest.main()
Add component test functions using pytest
Add component test functions using pytest
Python
mit
anirbanroydas/ci-testing-python,anirbanroydas/ci-testing-python,anirbanroydas/ci-testing-python
<REPLACE_OLD> sys print(sys.path) <REPLACE_NEW> pytest import requests from time import sleep COMPONENT_INDEX_URL = "http://identidock:5000" COMPONENT_MONSTER_BASE_URL = COMPONENT_INDEX_URL + '/monster' def test_get_mainpage(): print('component tester sleeping for 1 sec to let the identidock app to be ready adn also start its server') sleep(1) page = requests.get(COMPONENT_INDEX_URL) assert page.status_code == 200 assert 'Joe Bloggs' in str(page.text) def test_post_mainpage(): page = requests.post(COMPONENT_INDEX_URL, data=dict(name="Moby Dock")) assert page.status_code == 200 assert 'Moby Dock' in str(page.text) def test_mainpage_html_escaping(): page = requests.post(COMPONENT_INDEX_URL, data=dict(name='"><b>TEST</b><!--')) assert page.status_code == 200 assert '<b>' not in str(page.text) def test_get_identicon_with_valid_name_and_invalid_post_method_should_return_405(): name_hash = 'ABCDEF123456789' page = requests.post('{0}/{1}'.format(COMPONENT_MONSTER_BASE_URL, name_hash)) assert page.status_code == 405 def test_get_identicon_with_valid_name_and_cache_miss(): name_hash = 'ABCDEF123456789' page = requests.get('{0}/{1}'.format(COMPONENT_MONSTER_BASE_URL, name_hash)) # print('page.content : {0}'.format(page.content)) assert page.status_code == 200 def test_get_identicon_with_valid_name_and_cache_hit(): name_hash = 'ABCDEF123456789' page = requests.get('{0}/{1}'.format(COMPONENT_MONSTER_BASE_URL, name_hash)) # print('page.content : {0}'.format(page.content)) assert page.status_code == 200 def test_get_identicon_with_insecure_and_unescaped_invalid_name_hash(): invalid_name_hash = '<b>;i_am_invalid|name <{"' page = requests.get('{0}/{1}'.format(COMPONENT_MONSTER_BASE_URL, invalid_name_hash)) # print('page.content : {0}'.format(page.content)) assert page.status_code == 200 if __name__ == '__main__': # unittest.main() pytest.main() <REPLACE_END> <|endoftext|> import pytest import requests from time import sleep COMPONENT_INDEX_URL = "http://identidock:5000" COMPONENT_MONSTER_BASE_URL = COMPONENT_INDEX_URL + '/monster' def test_get_mainpage(): print('component tester sleeping for 1 sec to let the identidock app to be ready adn also start its server') sleep(1) page = requests.get(COMPONENT_INDEX_URL) assert page.status_code == 200 assert 'Joe Bloggs' in str(page.text) def test_post_mainpage(): page = requests.post(COMPONENT_INDEX_URL, data=dict(name="Moby Dock")) assert page.status_code == 200 assert 'Moby Dock' in str(page.text) def test_mainpage_html_escaping(): page = requests.post(COMPONENT_INDEX_URL, data=dict(name='"><b>TEST</b><!--')) assert page.status_code == 200 assert '<b>' not in str(page.text) def test_get_identicon_with_valid_name_and_invalid_post_method_should_return_405(): name_hash = 'ABCDEF123456789' page = requests.post('{0}/{1}'.format(COMPONENT_MONSTER_BASE_URL, name_hash)) assert page.status_code == 405 def test_get_identicon_with_valid_name_and_cache_miss(): name_hash = 'ABCDEF123456789' page = requests.get('{0}/{1}'.format(COMPONENT_MONSTER_BASE_URL, name_hash)) # print('page.content : {0}'.format(page.content)) assert page.status_code == 200 def test_get_identicon_with_valid_name_and_cache_hit(): name_hash = 'ABCDEF123456789' page = requests.get('{0}/{1}'.format(COMPONENT_MONSTER_BASE_URL, name_hash)) # print('page.content : {0}'.format(page.content)) assert page.status_code == 200 def test_get_identicon_with_insecure_and_unescaped_invalid_name_hash(): invalid_name_hash = '<b>;i_am_invalid|name <{"' page = requests.get('{0}/{1}'.format(COMPONENT_MONSTER_BASE_URL, invalid_name_hash)) # print('page.content : {0}'.format(page.content)) assert page.status_code == 200 if __name__ == '__main__': # unittest.main() pytest.main()
Add component test functions using pytest import sys print(sys.path)
2c141722aa8478b7e6a078d02206a26db3772a95
setup.py
setup.py
import os from setuptools import setup def getPackages(base): packages = [] def visit(arg, directory, files): if '__init__.py' in files: packages.append(directory.replace('/', '.')) os.path.walk(base, visit, None) return packages setup( name='tryfer', version='0.1', description='Twisted Zipkin Tracing Library', classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 2.7', 'Framework :: Twisted' ], license='APL2', url='https://github.com/racker/tryfer', packages=getPackages('tryfer'), install_requires=[ 'Twisted >= 12.0.0', 'thrift == 0.8.0', 'scrivener == 0.2' ], )
import os from setuptools import setup def getPackages(base): packages = [] def visit(arg, directory, files): if '__init__.py' in files: packages.append(directory.replace('/', '.')) os.path.walk(base, visit, None) return packages setup( name='tryfer', version='0.1', description='Twisted Zipkin Tracing Library', classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 2.7', 'Framework :: Twisted' ], maintainer='David Reid', maintainer_email='[email protected]', license='APL2', url='https://github.com/racker/tryfer', long_description=open('README.rst').read(), packages=getPackages('tryfer'), install_requires=[ 'Twisted >= 12.0.0', 'thrift == 0.8.0', 'scrivener == 0.2' ], )
Add maintainer and long description.
Add maintainer and long description.
Python
apache-2.0
tryfer/tryfer
<INSERT> maintainer='David Reid', maintainer_email='[email protected]', <INSERT_END> <REPLACE_OLD> url='https://github.com/racker/tryfer', <REPLACE_NEW> url='https://github.com/racker/tryfer', long_description=open('README.rst').read(), <REPLACE_END> <|endoftext|> import os from setuptools import setup def getPackages(base): packages = [] def visit(arg, directory, files): if '__init__.py' in files: packages.append(directory.replace('/', '.')) os.path.walk(base, visit, None) return packages setup( name='tryfer', version='0.1', description='Twisted Zipkin Tracing Library', classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 2.7', 'Framework :: Twisted' ], maintainer='David Reid', maintainer_email='[email protected]', license='APL2', url='https://github.com/racker/tryfer', long_description=open('README.rst').read(), packages=getPackages('tryfer'), install_requires=[ 'Twisted >= 12.0.0', 'thrift == 0.8.0', 'scrivener == 0.2' ], )
Add maintainer and long description. import os from setuptools import setup def getPackages(base): packages = [] def visit(arg, directory, files): if '__init__.py' in files: packages.append(directory.replace('/', '.')) os.path.walk(base, visit, None) return packages setup( name='tryfer', version='0.1', description='Twisted Zipkin Tracing Library', classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 2.7', 'Framework :: Twisted' ], license='APL2', url='https://github.com/racker/tryfer', packages=getPackages('tryfer'), install_requires=[ 'Twisted >= 12.0.0', 'thrift == 0.8.0', 'scrivener == 0.2' ], )
0e593183ccf9fe719d8dc6ced05a9967698f5c7d
api/app.py
api/app.py
from flask import Flask from flask import request from flask import jsonify from y_text_recommender_system.recommender import recommend app = Flask(__name__) class InvalidUsage(Exception): status_code = 400 def __init__(self, message, payload=None): Exception.__init__(self) self.message = message self.payload = payload def to_dict(self): rv = dict(self.payload or ()) rv['message'] = self.message return rv @app.errorhandler(InvalidUsage) def handle_invalid_usage(error): response = jsonify(error.to_dict()) response.status_code = error.status_code return response @app.route('/') def hello_world(): return 'Hello, World!' @app.route('/recommender/', methods=['GET', 'POST']) def recommender(): content = request.get_json() if content is not None: doc = content.get('doc', {}) docs = content.get('docs', []) if doc == {}: msg = 'The parameter `doc` is missing or empty' raise InvalidUsage(msg) if len(docs) == 0: msg = 'The parameter `docs` is missing or empty' raise InvalidUsage(msg) result = recommend(doc, docs) return jsonify(result) else: msg = 'You need to send the parameters: doc and docs' raise InvalidUsage(msg)
from flask import Flask from flask import request from flask import jsonify from y_text_recommender_system.recommender import recommend app = Flask(__name__) class InvalidUsage(Exception): status_code = 400 def __init__(self, message, payload=None): Exception.__init__(self) self.message = message self.payload = payload def to_dict(self): rv = dict(self.payload or ()) rv['message'] = self.message return rv @app.errorhandler(InvalidUsage) def handle_invalid_usage(error): response = jsonify(error.to_dict()) response.status_code = error.status_code return response @app.route('/') def hello_world(): return 'Hello, World!' @app.route('/recommender/', methods=['POST']) def recommender(): content = request.get_json() if content is not None: doc = content.get('doc', {}) docs = content.get('docs', []) if doc == {}: msg = 'The parameter `doc` is missing or empty' raise InvalidUsage(msg) if len(docs) == 0: msg = 'The parameter `docs` is missing or empty' raise InvalidUsage(msg) result = recommend(doc, docs) return jsonify(result) else: msg = 'You need to send the parameters: doc and docs' raise InvalidUsage(msg)
Remove GET options in url
Remove GET options in url
Python
mit
joaojunior/y_text_recommender_system
<REPLACE_OLD> methods=['GET', 'POST']) def <REPLACE_NEW> methods=['POST']) def <REPLACE_END> <|endoftext|> from flask import Flask from flask import request from flask import jsonify from y_text_recommender_system.recommender import recommend app = Flask(__name__) class InvalidUsage(Exception): status_code = 400 def __init__(self, message, payload=None): Exception.__init__(self) self.message = message self.payload = payload def to_dict(self): rv = dict(self.payload or ()) rv['message'] = self.message return rv @app.errorhandler(InvalidUsage) def handle_invalid_usage(error): response = jsonify(error.to_dict()) response.status_code = error.status_code return response @app.route('/') def hello_world(): return 'Hello, World!' @app.route('/recommender/', methods=['POST']) def recommender(): content = request.get_json() if content is not None: doc = content.get('doc', {}) docs = content.get('docs', []) if doc == {}: msg = 'The parameter `doc` is missing or empty' raise InvalidUsage(msg) if len(docs) == 0: msg = 'The parameter `docs` is missing or empty' raise InvalidUsage(msg) result = recommend(doc, docs) return jsonify(result) else: msg = 'You need to send the parameters: doc and docs' raise InvalidUsage(msg)
Remove GET options in url from flask import Flask from flask import request from flask import jsonify from y_text_recommender_system.recommender import recommend app = Flask(__name__) class InvalidUsage(Exception): status_code = 400 def __init__(self, message, payload=None): Exception.__init__(self) self.message = message self.payload = payload def to_dict(self): rv = dict(self.payload or ()) rv['message'] = self.message return rv @app.errorhandler(InvalidUsage) def handle_invalid_usage(error): response = jsonify(error.to_dict()) response.status_code = error.status_code return response @app.route('/') def hello_world(): return 'Hello, World!' @app.route('/recommender/', methods=['GET', 'POST']) def recommender(): content = request.get_json() if content is not None: doc = content.get('doc', {}) docs = content.get('docs', []) if doc == {}: msg = 'The parameter `doc` is missing or empty' raise InvalidUsage(msg) if len(docs) == 0: msg = 'The parameter `docs` is missing or empty' raise InvalidUsage(msg) result = recommend(doc, docs) return jsonify(result) else: msg = 'You need to send the parameters: doc and docs' raise InvalidUsage(msg)
6bc1f6e466fa09dd0bc6a076f9081e1aa03efdc7
examples/translations/dutch_test_1.py
examples/translations/dutch_test_1.py
# Dutch Language Test from seleniumbase.translate.dutch import Testgeval class MijnTestklasse(Testgeval): def test_voorbeeld_1(self): self.openen("https://nl.wikipedia.org/wiki/Hoofdpagina") self.controleren_element('a[title*="hoofdpagina gaan"]') self.controleren_tekst("Welkom op Wikipedia", "td.hp-welkom") self.typ("#searchInput", "Stroopwafel") self.klik("#searchButton") self.controleren_tekst("Stroopwafel", "#firstHeading") self.controleren_element('img[alt="Stroopwafels"]') self.typ("#searchInput", "Rijksmuseum Amsterdam") self.klik("#searchButton") self.controleren_tekst("Rijksmuseum", "#firstHeading") self.controleren_element('img[alt="Het Rijksmuseum"]') self.terug() self.controleren_ware("Stroopwafel" in self.huidige_url_ophalen()) self.vooruit() self.controleren_ware("Rijksmuseum" in self.huidige_url_ophalen())
# Dutch Language Test from seleniumbase.translate.dutch import Testgeval class MijnTestklasse(Testgeval): def test_voorbeeld_1(self): self.openen("https://nl.wikipedia.org/wiki/Hoofdpagina") self.controleren_element('a[title*="hoofdpagina gaan"]') self.controleren_tekst("Welkom op Wikipedia", "td.hp-welkom") self.typ("#searchInput", "Stroopwafel") self.klik("#searchButton") self.controleren_tekst("Stroopwafel", "#firstHeading") self.controleren_element('img[src*="Stroopwafels"]') self.typ("#searchInput", "Rijksmuseum Amsterdam") self.klik("#searchButton") self.controleren_tekst("Rijksmuseum", "#firstHeading") self.controleren_element('img[src*="Rijksmuseum"]') self.terug() self.controleren_ware("Stroopwafel" in self.huidige_url_ophalen()) self.vooruit() self.controleren_ware("Rijksmuseum" in self.huidige_url_ophalen())
Update the Dutch example test
Update the Dutch example test
Python
mit
seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase
<REPLACE_OLD> self.controleren_element('img[alt="Stroopwafels"]') <REPLACE_NEW> self.controleren_element('img[src*="Stroopwafels"]') <REPLACE_END> <REPLACE_OLD> self.controleren_element('img[alt="Het Rijksmuseum"]') <REPLACE_NEW> self.controleren_element('img[src*="Rijksmuseum"]') <REPLACE_END> <|endoftext|> # Dutch Language Test from seleniumbase.translate.dutch import Testgeval class MijnTestklasse(Testgeval): def test_voorbeeld_1(self): self.openen("https://nl.wikipedia.org/wiki/Hoofdpagina") self.controleren_element('a[title*="hoofdpagina gaan"]') self.controleren_tekst("Welkom op Wikipedia", "td.hp-welkom") self.typ("#searchInput", "Stroopwafel") self.klik("#searchButton") self.controleren_tekst("Stroopwafel", "#firstHeading") self.controleren_element('img[src*="Stroopwafels"]') self.typ("#searchInput", "Rijksmuseum Amsterdam") self.klik("#searchButton") self.controleren_tekst("Rijksmuseum", "#firstHeading") self.controleren_element('img[src*="Rijksmuseum"]') self.terug() self.controleren_ware("Stroopwafel" in self.huidige_url_ophalen()) self.vooruit() self.controleren_ware("Rijksmuseum" in self.huidige_url_ophalen())
Update the Dutch example test # Dutch Language Test from seleniumbase.translate.dutch import Testgeval class MijnTestklasse(Testgeval): def test_voorbeeld_1(self): self.openen("https://nl.wikipedia.org/wiki/Hoofdpagina") self.controleren_element('a[title*="hoofdpagina gaan"]') self.controleren_tekst("Welkom op Wikipedia", "td.hp-welkom") self.typ("#searchInput", "Stroopwafel") self.klik("#searchButton") self.controleren_tekst("Stroopwafel", "#firstHeading") self.controleren_element('img[alt="Stroopwafels"]') self.typ("#searchInput", "Rijksmuseum Amsterdam") self.klik("#searchButton") self.controleren_tekst("Rijksmuseum", "#firstHeading") self.controleren_element('img[alt="Het Rijksmuseum"]') self.terug() self.controleren_ware("Stroopwafel" in self.huidige_url_ophalen()) self.vooruit() self.controleren_ware("Rijksmuseum" in self.huidige_url_ophalen())
5a6a96435b7cf45cbbc5f2b81a7be84cd986b456
haas/__main__.py
haas/__main__.py
# -*- coding: utf-8 -*- # Copyright (c) 2013-2014 Simon Jagoe # All rights reserved. # # This software may be modified and distributed under the terms # of the 3-clause BSD license. See the LICENSE.txt file for details. import sys # pragma: no cover from .main import main # pragma: no cover if __name__ == '__main__': sys.exit(main())
# -*- coding: utf-8 -*- # Copyright (c) 2013-2014 Simon Jagoe # All rights reserved. # # This software may be modified and distributed under the terms # of the 3-clause BSD license. See the LICENSE.txt file for details. import sys # pragma: no cover from haas.main import main # pragma: no cover if __name__ == '__main__': sys.exit(main())
Use absolute import for main entry point.
Use absolute import for main entry point.
Python
bsd-3-clause
itziakos/haas,sjagoe/haas,scalative/haas,sjagoe/haas,itziakos/haas,scalative/haas
<REPLACE_OLD> .main <REPLACE_NEW> haas.main <REPLACE_END> <|endoftext|> # -*- coding: utf-8 -*- # Copyright (c) 2013-2014 Simon Jagoe # All rights reserved. # # This software may be modified and distributed under the terms # of the 3-clause BSD license. See the LICENSE.txt file for details. import sys # pragma: no cover from haas.main import main # pragma: no cover if __name__ == '__main__': sys.exit(main())
Use absolute import for main entry point. # -*- coding: utf-8 -*- # Copyright (c) 2013-2014 Simon Jagoe # All rights reserved. # # This software may be modified and distributed under the terms # of the 3-clause BSD license. See the LICENSE.txt file for details. import sys # pragma: no cover from .main import main # pragma: no cover if __name__ == '__main__': sys.exit(main())
ffff9d10862391289e4fba8ac120983ac6368200
setup.py
setup.py
from setuptools import setup setup( name='cmsplugin-biography', version='0.0.1', packages=['cmsplugin_biography', 'cmsplugin_biography.migrations', ], install_requires=[ 'django-cms', 'djangocms-text-ckeditor==1.0.9', 'easy-thumbnails==1.2', ], author='Kevin Richardson', author_email='[email protected]', description='A Django CMS plugin that manages and displays biographical information', long_description=open('README.rst').read(), license='MIT', url='http://github.com/kfr2/cmsplugin-biography', include_package_data=True )
from setuptools import setup setup( name='cmsplugin-biography', version='0.0.1', packages=['cmsplugin_biography', 'cmsplugin_biography.migrations', ], install_requires=[ 'django-cms', 'djangocms-text-ckeditor==1.0.9', 'easy-thumbnails==1.2', ], author='Kevin Richardson', author_email='[email protected]', description='A Django CMS plugin that manages and displays biographical information', long_description=open('README.rst').read(), license='MIT', url='http://github.com/kfr2/cmsplugin-biography', include_package_data=True, zip_safe=False )
Mark package as not zip_safe
Mark package as not zip_safe This package needs access to its templates to function. Thus, the zip_safe flag has been set to False to tell setuptools to not install the package's egg as a zip file. See http://pythonhosted.org/distribute/setuptools.html#setting-the-zip-safe-flag for further information.
Python
mit
kfr2/cmsplugin-biography
<REPLACE_OLD> include_package_data=True ) <REPLACE_NEW> include_package_data=True, zip_safe=False ) <REPLACE_END> <|endoftext|> from setuptools import setup setup( name='cmsplugin-biography', version='0.0.1', packages=['cmsplugin_biography', 'cmsplugin_biography.migrations', ], install_requires=[ 'django-cms', 'djangocms-text-ckeditor==1.0.9', 'easy-thumbnails==1.2', ], author='Kevin Richardson', author_email='[email protected]', description='A Django CMS plugin that manages and displays biographical information', long_description=open('README.rst').read(), license='MIT', url='http://github.com/kfr2/cmsplugin-biography', include_package_data=True, zip_safe=False )
Mark package as not zip_safe This package needs access to its templates to function. Thus, the zip_safe flag has been set to False to tell setuptools to not install the package's egg as a zip file. See http://pythonhosted.org/distribute/setuptools.html#setting-the-zip-safe-flag for further information. from setuptools import setup setup( name='cmsplugin-biography', version='0.0.1', packages=['cmsplugin_biography', 'cmsplugin_biography.migrations', ], install_requires=[ 'django-cms', 'djangocms-text-ckeditor==1.0.9', 'easy-thumbnails==1.2', ], author='Kevin Richardson', author_email='[email protected]', description='A Django CMS plugin that manages and displays biographical information', long_description=open('README.rst').read(), license='MIT', url='http://github.com/kfr2/cmsplugin-biography', include_package_data=True )
3c9a062ebb7745fbdefcf836165ef5cd85825417
setup.py
setup.py
from setuptools import setup, Extension import numpy as np import os extension_name = '_pyaccess' extension_version = '.1' include_dirs = [ 'ann_1.1.2/include', 'sparsehash-2.0.2/src', np.get_include(), '.' ] library_dirs = [ 'ann_1.1.2/lib', 'contraction_hierarchies' ] packages = ['pyaccess'] libraries = [ 'ANN', 'ch', 'gomp'] source_files = [ 'pyaccess/accessibility.cpp', 'pyaccess/graphalg.cpp', 'pyaccess/nearestneighbor.cpp', 'pyaccess/pyaccesswrap.cpp' ] extra_compile_args = [ '-shared', '-DMACOSX', '-DLINUX', '-w', '-std=gnu++0x', '-O3', '-fopenmp', '-fpic', '-g', '-Wno-deprecated', # '-ferror-limit=1' ] py_modules=['pyaccess/pyaccess', 'pyaccess/urbanaccess'] setup( packages=packages, py_modules=py_modules, name=extension_name, version=extension_version, ext_modules=[ Extension( extension_name, source_files, include_dirs=include_dirs, library_dirs=library_dirs, libraries=libraries, extra_compile_args=extra_compile_args ) ] )
from setuptools import setup, Extension import numpy as np import os extension_name = '_pyaccess' extension_version = '.1' include_dirs = [ 'ann_1.1.2/include', 'sparsehash-2.0.2/src', np.get_include(), '.' ] library_dirs = [ 'ann_1.1.2/lib', 'contraction_hierarchies' ] packages = ['pyaccess'] libraries = [ 'ANN', 'ch', 'gomp'] source_files = [ 'pyaccess/accessibility.cpp', 'pyaccess/graphalg.cpp', 'pyaccess/nearestneighbor.cpp', 'pyaccess/pyaccesswrap.cpp' ] extra_compile_args = [ '-shared', '-DMACOSX', '-DLINUX', '-w', '-std=gnu++0x', '-O3', '-fopenmp', '-fpic', '-g', '-Wno-deprecated', ] py_modules=['pyaccess/pyaccess', 'pyaccess/urbanaccess'] setup( packages=packages, py_modules=py_modules, name='pyaccess', version=extension_version, ext_modules=[ Extension( extension_name, source_files, include_dirs=include_dirs, library_dirs=library_dirs, libraries=libraries, extra_compile_args=extra_compile_args ) ] )
Use pyaccess as the package name.
Use pyaccess as the package name.
Python
agpl-3.0
UDST/pandana,SANDAG/pandana,UDST/pandana,rafapereirabr/pandana,SANDAG/pandana,UDST/pandana,waddell/pandana,waddell/pandana,waddell/pandana,synthicity/pandana,rafapereirabr/pandana,osPlanning/pandana,waddell/pandana,osPlanning/pandana,osPlanning/pandana,osPlanning/pandana,rafapereirabr/pandana,synthicity/pandana,rafapereirabr/pandana,SANDAG/pandana,UDST/pandana,synthicity/pandana,SANDAG/pandana,synthicity/pandana
<REPLACE_OLD> '-Wno-deprecated', # '-ferror-limit=1' ] py_modules=['pyaccess/pyaccess', <REPLACE_NEW> '-Wno-deprecated', ] py_modules=['pyaccess/pyaccess', <REPLACE_END> <REPLACE_OLD> name=extension_name, <REPLACE_NEW> name='pyaccess', <REPLACE_END> <|endoftext|> from setuptools import setup, Extension import numpy as np import os extension_name = '_pyaccess' extension_version = '.1' include_dirs = [ 'ann_1.1.2/include', 'sparsehash-2.0.2/src', np.get_include(), '.' ] library_dirs = [ 'ann_1.1.2/lib', 'contraction_hierarchies' ] packages = ['pyaccess'] libraries = [ 'ANN', 'ch', 'gomp'] source_files = [ 'pyaccess/accessibility.cpp', 'pyaccess/graphalg.cpp', 'pyaccess/nearestneighbor.cpp', 'pyaccess/pyaccesswrap.cpp' ] extra_compile_args = [ '-shared', '-DMACOSX', '-DLINUX', '-w', '-std=gnu++0x', '-O3', '-fopenmp', '-fpic', '-g', '-Wno-deprecated', ] py_modules=['pyaccess/pyaccess', 'pyaccess/urbanaccess'] setup( packages=packages, py_modules=py_modules, name='pyaccess', version=extension_version, ext_modules=[ Extension( extension_name, source_files, include_dirs=include_dirs, library_dirs=library_dirs, libraries=libraries, extra_compile_args=extra_compile_args ) ] )
Use pyaccess as the package name. from setuptools import setup, Extension import numpy as np import os extension_name = '_pyaccess' extension_version = '.1' include_dirs = [ 'ann_1.1.2/include', 'sparsehash-2.0.2/src', np.get_include(), '.' ] library_dirs = [ 'ann_1.1.2/lib', 'contraction_hierarchies' ] packages = ['pyaccess'] libraries = [ 'ANN', 'ch', 'gomp'] source_files = [ 'pyaccess/accessibility.cpp', 'pyaccess/graphalg.cpp', 'pyaccess/nearestneighbor.cpp', 'pyaccess/pyaccesswrap.cpp' ] extra_compile_args = [ '-shared', '-DMACOSX', '-DLINUX', '-w', '-std=gnu++0x', '-O3', '-fopenmp', '-fpic', '-g', '-Wno-deprecated', # '-ferror-limit=1' ] py_modules=['pyaccess/pyaccess', 'pyaccess/urbanaccess'] setup( packages=packages, py_modules=py_modules, name=extension_name, version=extension_version, ext_modules=[ Extension( extension_name, source_files, include_dirs=include_dirs, library_dirs=library_dirs, libraries=libraries, extra_compile_args=extra_compile_args ) ] )
e91b691ba2e9a83d8cc94f42bdc41c9a7350c790
setup.py
setup.py
#!/usr/bin/env python """Distutils installer for extras.""" from setuptools import setup import os.path import extras testtools_cmd = extras.try_import('testtools.TestCommand') def get_version(): """Return the version of extras that we are building.""" version = '.'.join( str(component) for component in extras.__version__[0:3]) return version def get_long_description(): readme_path = os.path.join( os.path.dirname(__file__), 'README.rst') return open(readme_path).read() cmdclass = {} if testtools_cmd is not None: cmdclass['test'] = testtools_cmd setup(name='extras', author='Testing cabal', author_email='[email protected]', url='https://github.com/testing-cabal/extras', description=('Useful extra bits for Python - things that shold be ' 'in the standard library'), long_description=get_long_description(), version=get_version(), classifiers=["License :: OSI Approved :: MIT License"], packages=[ 'extras', 'extras.tests', ], cmdclass=cmdclass)
#!/usr/bin/env python """Distutils installer for extras.""" from setuptools import setup import os.path import extras testtools_cmd = extras.try_import('testtools.TestCommand') def get_version(): """Return the version of extras that we are building.""" version = '.'.join( str(component) for component in extras.__version__[0:3]) return version def get_long_description(): readme_path = os.path.join( os.path.dirname(__file__), 'README.rst') return open(readme_path).read() cmdclass = {} if testtools_cmd is not None: cmdclass['test'] = testtools_cmd setup(name='extras', author='Testing cabal', author_email='[email protected]', url='https://github.com/testing-cabal/extras', description=('Useful extra bits for Python - things that shold be ' 'in the standard library'), long_description=get_long_description(), version=get_version(), classifiers=[ "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python", "Programming Language :: Python :: 3", ], packages=[ 'extras', 'extras.tests', ], cmdclass=cmdclass)
Add trove classifiers specifying Python 3 support.
Add trove classifiers specifying Python 3 support.
Python
mit
testing-cabal/extras
<REPLACE_OLD> classifiers=["License <REPLACE_NEW> classifiers=[ "Intended Audience :: Developers", "License <REPLACE_END> <REPLACE_OLD> License"], <REPLACE_NEW> License", "Programming Language :: Python", "Programming Language :: Python :: 3", ], <REPLACE_END> <|endoftext|> #!/usr/bin/env python """Distutils installer for extras.""" from setuptools import setup import os.path import extras testtools_cmd = extras.try_import('testtools.TestCommand') def get_version(): """Return the version of extras that we are building.""" version = '.'.join( str(component) for component in extras.__version__[0:3]) return version def get_long_description(): readme_path = os.path.join( os.path.dirname(__file__), 'README.rst') return open(readme_path).read() cmdclass = {} if testtools_cmd is not None: cmdclass['test'] = testtools_cmd setup(name='extras', author='Testing cabal', author_email='[email protected]', url='https://github.com/testing-cabal/extras', description=('Useful extra bits for Python - things that shold be ' 'in the standard library'), long_description=get_long_description(), version=get_version(), classifiers=[ "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python", "Programming Language :: Python :: 3", ], packages=[ 'extras', 'extras.tests', ], cmdclass=cmdclass)
Add trove classifiers specifying Python 3 support. #!/usr/bin/env python """Distutils installer for extras.""" from setuptools import setup import os.path import extras testtools_cmd = extras.try_import('testtools.TestCommand') def get_version(): """Return the version of extras that we are building.""" version = '.'.join( str(component) for component in extras.__version__[0:3]) return version def get_long_description(): readme_path = os.path.join( os.path.dirname(__file__), 'README.rst') return open(readme_path).read() cmdclass = {} if testtools_cmd is not None: cmdclass['test'] = testtools_cmd setup(name='extras', author='Testing cabal', author_email='[email protected]', url='https://github.com/testing-cabal/extras', description=('Useful extra bits for Python - things that shold be ' 'in the standard library'), long_description=get_long_description(), version=get_version(), classifiers=["License :: OSI Approved :: MIT License"], packages=[ 'extras', 'extras.tests', ], cmdclass=cmdclass)
04337a036429e98edab7c2e5f17086a3ccfe263b
jsonsempai.py
jsonsempai.py
import sys class SempaiLoader(object): def __init__(self, *args): print args def find_module(self, fullname, path=None): print 'finding', fullname, path if fullname == 'simple': return self return None sys.path_hooks.append(SempaiLoader) sys.path.insert(0, 'simple')
Add very simple module finder
Add very simple module finder
Python
mit
kragniz/json-sempai
<INSERT> import sys class SempaiLoader(object): <INSERT_END> <INSERT> def __init__(self, *args): print args def find_module(self, fullname, path=None): print 'finding', fullname, path if fullname == 'simple': return self return None sys.path_hooks.append(SempaiLoader) sys.path.insert(0, 'simple') <INSERT_END> <|endoftext|> import sys class SempaiLoader(object): def __init__(self, *args): print args def find_module(self, fullname, path=None): print 'finding', fullname, path if fullname == 'simple': return self return None sys.path_hooks.append(SempaiLoader) sys.path.insert(0, 'simple')
Add very simple module finder
b6bf01a5c95da0de1e6831a3cf41243e69297854
setup.py
setup.py
# Copyright (C) 2011, 2012 Nippon Telegraph and Telephone Corporation. # Copyright (C) 2011 Isaku Yamahata <yamahata at valinux co jp> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # a bug workaround. http://bugs.python.org/issue15881 try: import multiprocessing except ImportError: pass import setuptools import ryu.hooks ryu.hooks.save_orig() setuptools.setup(name='ryu', setup_requires=['pbr'], pbr=True)
# Copyright (C) 2011, 2012 Nippon Telegraph and Telephone Corporation. # Copyright (C) 2011 Isaku Yamahata <yamahata at valinux co jp> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools import ryu.hooks ryu.hooks.save_orig() setuptools.setup(name='ryu', setup_requires=['pbr'], pbr=True)
Remove workaround for issue with older python versions.
Remove workaround for issue with older python versions.
Python
apache-2.0
osrg/ryu,osrg/ryu,osrg/ryu,osrg/ryu,osrg/ryu
<REPLACE_OLD> License. # a bug workaround. http://bugs.python.org/issue15881 try: import multiprocessing except ImportError: pass import <REPLACE_NEW> License. import <REPLACE_END> <|endoftext|> # Copyright (C) 2011, 2012 Nippon Telegraph and Telephone Corporation. # Copyright (C) 2011 Isaku Yamahata <yamahata at valinux co jp> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools import ryu.hooks ryu.hooks.save_orig() setuptools.setup(name='ryu', setup_requires=['pbr'], pbr=True)
Remove workaround for issue with older python versions. # Copyright (C) 2011, 2012 Nippon Telegraph and Telephone Corporation. # Copyright (C) 2011 Isaku Yamahata <yamahata at valinux co jp> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # a bug workaround. http://bugs.python.org/issue15881 try: import multiprocessing except ImportError: pass import setuptools import ryu.hooks ryu.hooks.save_orig() setuptools.setup(name='ryu', setup_requires=['pbr'], pbr=True)
f89398d49b53b3ec43b196c22a5735696f2de021
setup.py
setup.py
from setuptools import setup, find_packages setup( name='gdcdatamodel', packages=find_packages(), install_requires=[ 'pytz==2016.4', 'graphviz==0.4.2', 'jsonschema==2.5.1', 'psqlgraph', 'gdcdictionary', 'cdisutils', 'python-dateutil==2.4.2', ], package_data={ "gdcdatamodel": [ "xml_mappings/*.yaml", ] }, dependency_links=[ 'git+https://github.com/NCI-GDC/cdisutils.git@4a75cc05c7ba2174e70cca9c9ea7e93947f7a868#egg=cdisutils', 'git+https://github.com/NCI-GDC/psqlgraph.git@7b5de7d56aa3159a9526940eb273579ddbf084ca#egg=psqlgraph', 'git+https://github.com/NCI-GDC/[email protected]#egg=gdcdictionary', ], entry_points={ 'console_scripts': [ 'gdc_postgres_admin=gdcdatamodel.gdc_postgres_admin:main' ] }, )
from setuptools import setup, find_packages setup( name='gdcdatamodel', packages=find_packages(), install_requires=[ 'pytz==2016.4', 'graphviz==0.4.2', 'jsonschema==2.5.1', 'psqlgraph', 'gdcdictionary', 'cdisutils', 'python-dateutil==2.4.2', ], package_data={ "gdcdatamodel": [ "xml_mappings/*.yaml", ] }, dependency_links=[ 'git+https://github.com/NCI-GDC/cdisutils.git@863ce13772116b51bcf5ce7e556f5df3cb9e6f63#egg=cdisutils', 'git+https://github.com/NCI-GDC/[email protected]#egg=psqlgraph', 'git+https://github.com/NCI-GDC/[email protected]#egg=gdcdictionary', ], entry_points={ 'console_scripts': [ 'gdc_postgres_admin=gdcdatamodel.gdc_postgres_admin:main' ] }, )
Update pins to match horton dictionary/api
chore(pins): Update pins to match horton dictionary/api - Update pins to match horton dictionary/api
Python
apache-2.0
NCI-GDC/gdcdatamodel,NCI-GDC/gdcdatamodel
<REPLACE_OLD> 'git+https://github.com/NCI-GDC/cdisutils.git@4a75cc05c7ba2174e70cca9c9ea7e93947f7a868#egg=cdisutils', <REPLACE_NEW> 'git+https://github.com/NCI-GDC/cdisutils.git@863ce13772116b51bcf5ce7e556f5df3cb9e6f63#egg=cdisutils', <REPLACE_END> <REPLACE_OLD> 'git+https://github.com/NCI-GDC/psqlgraph.git@7b5de7d56aa3159a9526940eb273579ddbf084ca#egg=psqlgraph', <REPLACE_NEW> 'git+https://github.com/NCI-GDC/[email protected]#egg=psqlgraph', <REPLACE_END> <REPLACE_OLD> 'git+https://github.com/NCI-GDC/[email protected]#egg=gdcdictionary', <REPLACE_NEW> 'git+https://github.com/NCI-GDC/[email protected]#egg=gdcdictionary', <REPLACE_END> <|endoftext|> from setuptools import setup, find_packages setup( name='gdcdatamodel', packages=find_packages(), install_requires=[ 'pytz==2016.4', 'graphviz==0.4.2', 'jsonschema==2.5.1', 'psqlgraph', 'gdcdictionary', 'cdisutils', 'python-dateutil==2.4.2', ], package_data={ "gdcdatamodel": [ "xml_mappings/*.yaml", ] }, dependency_links=[ 'git+https://github.com/NCI-GDC/cdisutils.git@863ce13772116b51bcf5ce7e556f5df3cb9e6f63#egg=cdisutils', 'git+https://github.com/NCI-GDC/[email protected]#egg=psqlgraph', 'git+https://github.com/NCI-GDC/[email protected]#egg=gdcdictionary', ], entry_points={ 'console_scripts': [ 'gdc_postgres_admin=gdcdatamodel.gdc_postgres_admin:main' ] }, )
chore(pins): Update pins to match horton dictionary/api - Update pins to match horton dictionary/api from setuptools import setup, find_packages setup( name='gdcdatamodel', packages=find_packages(), install_requires=[ 'pytz==2016.4', 'graphviz==0.4.2', 'jsonschema==2.5.1', 'psqlgraph', 'gdcdictionary', 'cdisutils', 'python-dateutil==2.4.2', ], package_data={ "gdcdatamodel": [ "xml_mappings/*.yaml", ] }, dependency_links=[ 'git+https://github.com/NCI-GDC/cdisutils.git@4a75cc05c7ba2174e70cca9c9ea7e93947f7a868#egg=cdisutils', 'git+https://github.com/NCI-GDC/psqlgraph.git@7b5de7d56aa3159a9526940eb273579ddbf084ca#egg=psqlgraph', 'git+https://github.com/NCI-GDC/[email protected]#egg=gdcdictionary', ], entry_points={ 'console_scripts': [ 'gdc_postgres_admin=gdcdatamodel.gdc_postgres_admin:main' ] }, )
1261777b6aaaea6947a32477e340ef1597045866
nested_admin/urls.py
nested_admin/urls.py
try: from django.conf.urls.defaults import patterns, url except ImportError: from django.conf.urls import patterns, url urlpatterns = patterns('', url(r'^server-data\.js$', 'nested_admin.views.server_data_js', name="nesting_server_data"), )
from django.conf.urls import patterns, url urlpatterns = patterns('', url(r'^server-data\.js$', 'nested_admin.views.server_data_js', name="nesting_server_data"), )
Fix DeprecationWarning in Django 1.5
Fix DeprecationWarning in Django 1.5
Python
bsd-2-clause
sbussetti/django-nested-admin,sbussetti/django-nested-admin,olivierdalang/django-nested-admin,sbussetti/django-nested-admin,olivierdalang/django-nested-admin,olivierdalang/django-nested-admin
<DELETE> try: from django.conf.urls.defaults import patterns, url except ImportError: <DELETE_END> <|endoftext|> from django.conf.urls import patterns, url urlpatterns = patterns('', url(r'^server-data\.js$', 'nested_admin.views.server_data_js', name="nesting_server_data"), )
Fix DeprecationWarning in Django 1.5 try: from django.conf.urls.defaults import patterns, url except ImportError: from django.conf.urls import patterns, url urlpatterns = patterns('', url(r'^server-data\.js$', 'nested_admin.views.server_data_js', name="nesting_server_data"), )
c2506fdc71f1dcff2e3455c668e78ad6b7d5d94b
scripts/fenix/fenix_download.py
scripts/fenix/fenix_download.py
# python fenix_download login_file <url> -e <file_extension> (DEFAULT = pdf) -d <download_directory> # # from fenix import Fenix import argparse if __name__ == '__main__': # TODO: argparse parser = argparse.ArgumentParser(description='Download files from Fenix Edu pages') parser.add_argument('login', type=str, help='path to fenix login credentials file (check login.json for format)') parser.add_argument('url', type=str, help='url from where to download the files from') parser.add('-e', '--extension', type=str, default='pdf', help='file extensions to download, without the leading "." (* for all, default is "pdf")') parser.add('-d', '--directory', type=str, default='download', help='download directory')
Add inital structure for fenix downloader
Add inital structure for fenix downloader
Python
mit
iluxonchik/python-general-repo
<INSERT> # python fenix_download login_file <url> -e <file_extension> (DEFAULT = pdf) -d <download_directory> # # from fenix import Fenix import argparse if __name__ == '__main__': <INSERT_END> <INSERT> # TODO: argparse parser = argparse.ArgumentParser(description='Download files from Fenix Edu pages') parser.add_argument('login', type=str, help='path to fenix login credentials file (check login.json for format)') parser.add_argument('url', type=str, help='url from where to download the files from') parser.add('-e', '--extension', type=str, default='pdf', help='file extensions to download, without the leading "." (* for all, default is "pdf")') parser.add('-d', '--directory', type=str, default='download', help='download directory') <INSERT_END> <|endoftext|> # python fenix_download login_file <url> -e <file_extension> (DEFAULT = pdf) -d <download_directory> # # from fenix import Fenix import argparse if __name__ == '__main__': # TODO: argparse parser = argparse.ArgumentParser(description='Download files from Fenix Edu pages') parser.add_argument('login', type=str, help='path to fenix login credentials file (check login.json for format)') parser.add_argument('url', type=str, help='url from where to download the files from') parser.add('-e', '--extension', type=str, default='pdf', help='file extensions to download, without the leading "." (* for all, default is "pdf")') parser.add('-d', '--directory', type=str, default='download', help='download directory')
Add inital structure for fenix downloader
f100faade749d86597e1c8c52b88d55261e7a4dc
suorganizer/wsgi.py
suorganizer/wsgi.py
""" WSGI config for suorganizer project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "suorganizer.settings") application = get_wsgi_application()
""" WSGI config for suorganizer project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application from whitenoise.django import DjangoWhiteNoise os.environ.setdefault("DJANGO_SETTINGS_MODULE", "suorganizer.settings") application = get_wsgi_application() application = DjangoWhiteNoise(application)
Use WhiteNoise for static content.
Ch29: Use WhiteNoise for static content.
Python
bsd-2-clause
jambonrose/DjangoUnleashed-1.8,jambonrose/DjangoUnleashed-1.8
<REPLACE_OLD> get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", <REPLACE_NEW> get_wsgi_application from whitenoise.django import DjangoWhiteNoise os.environ.setdefault("DJANGO_SETTINGS_MODULE", <REPLACE_END> <REPLACE_OLD> get_wsgi_application() <REPLACE_NEW> get_wsgi_application() application = DjangoWhiteNoise(application) <REPLACE_END> <|endoftext|> """ WSGI config for suorganizer project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application from whitenoise.django import DjangoWhiteNoise os.environ.setdefault("DJANGO_SETTINGS_MODULE", "suorganizer.settings") application = get_wsgi_application() application = DjangoWhiteNoise(application)
Ch29: Use WhiteNoise for static content. """ WSGI config for suorganizer project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "suorganizer.settings") application = get_wsgi_application()
44035c166ffde209a47d7739af0d56acb4ec0422
notebooks/test_notebooks.py
notebooks/test_notebooks.py
# -*- coding: utf-8 -*- ''' Checks notebook execution result. Equal to this command + error management: jupyter nbconvert --to notebook --execute --ExecutePreprocessor.timeout=60 --output executed_notebook.ipynb demo.ipynb For jupyter configuration information, run: jupyter --path ''' # Dependencies: nbformat, nbconvert, jupyter-client, ipykernel import io import nbformat from nbconvert.preprocessors import ExecutePreprocessor from nbconvert.preprocessors import CellExecutionError notebook_filename = 'demo.ipynb' run_path = '.' notebook_filename_out = 'executed_notebook.ipynb' with io.open(notebook_filename) as f: nb = nbformat.read(f, as_version=4) ep = ExecutePreprocessor(timeout=600, kernel_name='python') try: out = ep.preprocess(nb, {'metadata': {'path': run_path}}) except CellExecutionError: out = None msg = 'Error executing the notebook "%s".\n\n' % notebook_filename msg += 'See notebook "%s" for the traceback.' % notebook_filename_out print(msg) raise finally: with io.open(notebook_filename_out, mode='wt') as f: # io.open avoids UnicodeEncodeError nbformat.write(nb, f)
Add script to automate notebooks testing
Add script to automate notebooks testing
Python
agpl-3.0
openfisca/openfisca-tunisia,openfisca/openfisca-tunisia
<INSERT> # -*- coding: utf-8 -*- ''' Checks notebook execution result. Equal to this command + error management: jupyter nbconvert --to notebook --execute --ExecutePreprocessor.timeout=60 --output executed_notebook.ipynb demo.ipynb For jupyter configuration information, run: jupyter --path ''' # Dependencies: nbformat, nbconvert, jupyter-client, ipykernel import io import nbformat from nbconvert.preprocessors import ExecutePreprocessor from nbconvert.preprocessors import CellExecutionError notebook_filename = 'demo.ipynb' run_path = '.' notebook_filename_out = 'executed_notebook.ipynb' with io.open(notebook_filename) as f: <INSERT_END> <INSERT> nb = nbformat.read(f, as_version=4) ep = ExecutePreprocessor(timeout=600, kernel_name='python') try: out = ep.preprocess(nb, {'metadata': {'path': run_path}}) except CellExecutionError: out = None msg = 'Error executing the notebook "%s".\n\n' % notebook_filename msg += 'See notebook "%s" for the traceback.' % notebook_filename_out print(msg) raise finally: with io.open(notebook_filename_out, mode='wt') as f: # io.open avoids UnicodeEncodeError nbformat.write(nb, f) <INSERT_END> <|endoftext|> # -*- coding: utf-8 -*- ''' Checks notebook execution result. Equal to this command + error management: jupyter nbconvert --to notebook --execute --ExecutePreprocessor.timeout=60 --output executed_notebook.ipynb demo.ipynb For jupyter configuration information, run: jupyter --path ''' # Dependencies: nbformat, nbconvert, jupyter-client, ipykernel import io import nbformat from nbconvert.preprocessors import ExecutePreprocessor from nbconvert.preprocessors import CellExecutionError notebook_filename = 'demo.ipynb' run_path = '.' notebook_filename_out = 'executed_notebook.ipynb' with io.open(notebook_filename) as f: nb = nbformat.read(f, as_version=4) ep = ExecutePreprocessor(timeout=600, kernel_name='python') try: out = ep.preprocess(nb, {'metadata': {'path': run_path}}) except CellExecutionError: out = None msg = 'Error executing the notebook "%s".\n\n' % notebook_filename msg += 'See notebook "%s" for the traceback.' % notebook_filename_out print(msg) raise finally: with io.open(notebook_filename_out, mode='wt') as f: # io.open avoids UnicodeEncodeError nbformat.write(nb, f)
Add script to automate notebooks testing
cfc95643733244275e605a8ff0c00d4861067a13
character_shift/character_shift.py
character_shift/character_shift.py
#!/usr/bin/env python3 def shift(string, key, decipher=False): return ''.join( chr((ord(c.upper())-65+key*(1-2*decipher)) % 26+65+32*c.islower()) if c.isalpha() else c for c in string) if __name__ == '__main__': assert shift('abcz+', 1) == 'bcda+', shift('abcz+', 1) assert shift('ABCZ+', 1) == 'BCDA+', shift('ABCZ+', 1) assert shift('bcda+', 1, True) == 'abcz+', shift('bcda+', 1, True)
#!/usr/bin/env python3 def shift(string, key, decipher=False): return ''.join( chr((ord(c) & 224)+((ord(c) & 31)+25+key*(-2*decipher+1)*c.isalpha()) % 26+1) for c in string) if __name__ == '__main__': assert shift('abcz+', 1) == 'bcda+', shift('abcz+', 1) assert shift('ABCZ+', 1) == 'BCDA+', shift('ABCZ+', 1) assert shift('bcda+', 1, True) == 'abcz+', shift('bcda+', 1, True)
Use bitwise operators on ordinals to reduce code size
Use bitwise operators on ordinals to reduce code size The ASCII standard neatly organizes the characters in such a way that it is easy to manipulate and classify them using bitwise operators.
Python
mit
TotempaaltJ/tiniest-code,TotempaaltJ/tiniest-code
<REPLACE_OLD> chr((ord(c.upper())-65+key*(1-2*decipher)) % 26+65+32*c.islower()) <REPLACE_NEW> chr((ord(c) & 224)+((ord(c) & 31)+25+key*(-2*decipher+1)*c.isalpha()) <REPLACE_END> <REPLACE_OLD> if c.isalpha() else c <REPLACE_NEW> % 26+1) <REPLACE_END> <|endoftext|> #!/usr/bin/env python3 def shift(string, key, decipher=False): return ''.join( chr((ord(c) & 224)+((ord(c) & 31)+25+key*(-2*decipher+1)*c.isalpha()) % 26+1) for c in string) if __name__ == '__main__': assert shift('abcz+', 1) == 'bcda+', shift('abcz+', 1) assert shift('ABCZ+', 1) == 'BCDA+', shift('ABCZ+', 1) assert shift('bcda+', 1, True) == 'abcz+', shift('bcda+', 1, True)
Use bitwise operators on ordinals to reduce code size The ASCII standard neatly organizes the characters in such a way that it is easy to manipulate and classify them using bitwise operators. #!/usr/bin/env python3 def shift(string, key, decipher=False): return ''.join( chr((ord(c.upper())-65+key*(1-2*decipher)) % 26+65+32*c.islower()) if c.isalpha() else c for c in string) if __name__ == '__main__': assert shift('abcz+', 1) == 'bcda+', shift('abcz+', 1) assert shift('ABCZ+', 1) == 'BCDA+', shift('ABCZ+', 1) assert shift('bcda+', 1, True) == 'abcz+', shift('bcda+', 1, True)
81b6a138c476084f9ddd6063f31d3efd0ba6e2cf
start.py
start.py
# -*- coding: utf-8 -*- import argparse import logging import os import sys from twisted.internet import reactor from desertbot.config import Config, ConfigError from desertbot.factory import DesertBotFactory if __name__ == '__main__': parser = argparse.ArgumentParser(description='An IRC bot written in Python.') parser.add_argument('-c', '--config', help='the config file to read from', type=str, required=True) cmdArgs = parser.parse_args() os.chdir(os.path.dirname(os.path.abspath(__file__))) # Set up logging for stdout on the root 'desertbot' logger # Modules can then just add more handlers to the root logger to capture all logs to files in various ways rootLogger = logging.getLogger('desertbot') rootLogger.setLevel(logging.INFO) # TODO change this from config value once it's loaded logFormatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%H:%M:%S') streamHandler = logging.StreamHandler(stream=sys.stdout) streamHandler.setFormatter(logFormatter) rootLogger.addHandler(streamHandler) config = Config(cmdArgs.config) try: config.loadConfig() except ConfigError: rootLogger.exception("Failed to load configuration file {}".format(cmdArgs.config)) else: factory = DesertBotFactory(config) reactor.run()
# -*- coding: utf-8 -*- import argparse import logging import os import sys from twisted.internet import reactor from desertbot.config import Config, ConfigError from desertbot.factory import DesertBotFactory if __name__ == '__main__': parser = argparse.ArgumentParser(description='An IRC bot written in Python.') parser.add_argument('-c', '--config', help='the config file to read from', type=str, required=True) parser.add_argument('-l', '--loglevel', help='the logging level (default INFO)', type=str, default='INFO') cmdArgs = parser.parse_args() os.chdir(os.path.dirname(os.path.abspath(__file__))) # Set up logging for stdout on the root 'desertbot' logger # Modules can then just add more handlers to the root logger to capture all logs to files in various ways rootLogger = logging.getLogger('desertbot') numericLevel = getattr(logging, cmdArgs.loglevel.upper(), None) if isinstance(numericLevel, int): rootLogger.setLevel(numericLevel) else: raise ValueError('Invalid log level {}'.format(cmdArgs.loglevel)) logFormatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%H:%M:%S') streamHandler = logging.StreamHandler(stream=sys.stdout) streamHandler.setFormatter(logFormatter) rootLogger.addHandler(streamHandler) config = Config(cmdArgs.config) try: config.loadConfig() except ConfigError: rootLogger.exception("Failed to load configuration file {}".format(cmdArgs.config)) else: factory = DesertBotFactory(config) reactor.run()
Make the logging level configurable
Make the logging level configurable
Python
mit
DesertBot/DesertBot
<INSERT> parser.add_argument('-l', '--loglevel', help='the logging level (default INFO)', type=str, default='INFO') <INSERT_END> <REPLACE_OLD> rootLogger.setLevel(logging.INFO) # TODO change this from config value once it's loaded <REPLACE_NEW> numericLevel = getattr(logging, cmdArgs.loglevel.upper(), None) if isinstance(numericLevel, int): rootLogger.setLevel(numericLevel) else: raise ValueError('Invalid log level {}'.format(cmdArgs.loglevel)) <REPLACE_END> <|endoftext|> # -*- coding: utf-8 -*- import argparse import logging import os import sys from twisted.internet import reactor from desertbot.config import Config, ConfigError from desertbot.factory import DesertBotFactory if __name__ == '__main__': parser = argparse.ArgumentParser(description='An IRC bot written in Python.') parser.add_argument('-c', '--config', help='the config file to read from', type=str, required=True) parser.add_argument('-l', '--loglevel', help='the logging level (default INFO)', type=str, default='INFO') cmdArgs = parser.parse_args() os.chdir(os.path.dirname(os.path.abspath(__file__))) # Set up logging for stdout on the root 'desertbot' logger # Modules can then just add more handlers to the root logger to capture all logs to files in various ways rootLogger = logging.getLogger('desertbot') numericLevel = getattr(logging, cmdArgs.loglevel.upper(), None) if isinstance(numericLevel, int): rootLogger.setLevel(numericLevel) else: raise ValueError('Invalid log level {}'.format(cmdArgs.loglevel)) logFormatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%H:%M:%S') streamHandler = logging.StreamHandler(stream=sys.stdout) streamHandler.setFormatter(logFormatter) rootLogger.addHandler(streamHandler) config = Config(cmdArgs.config) try: config.loadConfig() except ConfigError: rootLogger.exception("Failed to load configuration file {}".format(cmdArgs.config)) else: factory = DesertBotFactory(config) reactor.run()
Make the logging level configurable # -*- coding: utf-8 -*- import argparse import logging import os import sys from twisted.internet import reactor from desertbot.config import Config, ConfigError from desertbot.factory import DesertBotFactory if __name__ == '__main__': parser = argparse.ArgumentParser(description='An IRC bot written in Python.') parser.add_argument('-c', '--config', help='the config file to read from', type=str, required=True) cmdArgs = parser.parse_args() os.chdir(os.path.dirname(os.path.abspath(__file__))) # Set up logging for stdout on the root 'desertbot' logger # Modules can then just add more handlers to the root logger to capture all logs to files in various ways rootLogger = logging.getLogger('desertbot') rootLogger.setLevel(logging.INFO) # TODO change this from config value once it's loaded logFormatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%H:%M:%S') streamHandler = logging.StreamHandler(stream=sys.stdout) streamHandler.setFormatter(logFormatter) rootLogger.addHandler(streamHandler) config = Config(cmdArgs.config) try: config.loadConfig() except ConfigError: rootLogger.exception("Failed to load configuration file {}".format(cmdArgs.config)) else: factory = DesertBotFactory(config) reactor.run()
ad70a7ec6543d64ec185eb2d52ccfa291a1dfad6
servicerating/views.py
servicerating/views.py
import csv from django.http import HttpResponse from servicerating.models import Response def report_responses(request): qs = Response.objects.raw("SELECT servicerating_response.*, servicerating_extra.value AS clinic_code from servicerating_response INNER JOIN servicerating_extra ON servicerating_response.contact_id = servicerating_extra.contact_id WHERE servicerating_extra.key = 'clinic_code'") # Create the HttpResponse object with the appropriate CSV header. response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename="servicerating_incl_clinic_code.csv"' writer = csv.writer(response) writer.writerow(["Contact", "Key", "Value", "Created At", "Updated At", "Clinic Code"]) for obj in qs: writer.writerow([obj.contact, obj.key, obj.value, obj.created_at, obj.updated_at, obj.clinic_code]) return response
import csv from django.http import HttpResponse from servicerating.models import Response def report_responses(request): qs = Response.objects.raw("SELECT servicerating_response.*, servicerating_extra.value AS clinic_code from servicerating_response INNER JOIN servicerating_extra ON servicerating_response.contact_id = servicerating_extra.contact_id WHERE servicerating_extra.key = 'clinic_code'") # Create the HttpResponse object with the appropriate CSV header. response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename="servicerating_incl_clinic_code.csv"' writer = csv.writer(response) writer.writerow(["Rating ID", "Contact ID", "Key", "Value", "Created At", "Updated At", "Clinic Code"]) for obj in qs: writer.writerow([obj.id, obj.contact_id, obj.key, obj.value, obj.created_at, obj.updated_at, obj.clinic_code]) return response
Remove FK's from CSV export for massive speed boost
Remove FK's from CSV export for massive speed boost
Python
bsd-3-clause
praekelt/ndoh-control,praekelt/ndoh-control,praekelt/ndoh-control,praekelt/ndoh-control
<REPLACE_OLD> writer.writerow(["Contact", <REPLACE_NEW> writer.writerow(["Rating ID", "Contact ID", <REPLACE_END> <REPLACE_OLD> writer.writerow([obj.contact, <REPLACE_NEW> writer.writerow([obj.id, obj.contact_id, <REPLACE_END> <|endoftext|> import csv from django.http import HttpResponse from servicerating.models import Response def report_responses(request): qs = Response.objects.raw("SELECT servicerating_response.*, servicerating_extra.value AS clinic_code from servicerating_response INNER JOIN servicerating_extra ON servicerating_response.contact_id = servicerating_extra.contact_id WHERE servicerating_extra.key = 'clinic_code'") # Create the HttpResponse object with the appropriate CSV header. response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename="servicerating_incl_clinic_code.csv"' writer = csv.writer(response) writer.writerow(["Rating ID", "Contact ID", "Key", "Value", "Created At", "Updated At", "Clinic Code"]) for obj in qs: writer.writerow([obj.id, obj.contact_id, obj.key, obj.value, obj.created_at, obj.updated_at, obj.clinic_code]) return response
Remove FK's from CSV export for massive speed boost import csv from django.http import HttpResponse from servicerating.models import Response def report_responses(request): qs = Response.objects.raw("SELECT servicerating_response.*, servicerating_extra.value AS clinic_code from servicerating_response INNER JOIN servicerating_extra ON servicerating_response.contact_id = servicerating_extra.contact_id WHERE servicerating_extra.key = 'clinic_code'") # Create the HttpResponse object with the appropriate CSV header. response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename="servicerating_incl_clinic_code.csv"' writer = csv.writer(response) writer.writerow(["Contact", "Key", "Value", "Created At", "Updated At", "Clinic Code"]) for obj in qs: writer.writerow([obj.contact, obj.key, obj.value, obj.created_at, obj.updated_at, obj.clinic_code]) return response
c5d4c0cbfced859407c5569d879cfb7b9815eb57
alerts/lib/alert_plugin_set.py
alerts/lib/alert_plugin_set.py
import os import sys sys.path.append(os.path.join(os.path.dirname(__file__), "../../lib")) from plugin_set import PluginSet from utilities.logger import logger class AlertPluginSet(PluginSet): def send_message_to_plugin(self, plugin_class, message, metadata=None): if 'utctimestamp' in message and 'summary' in message: message_log_str = '{0} received message: ({1}) {2}'.format(plugin_class.__module__, message['utctimestamp'], message['summary']) logger.info(message_log_str) return plugin_class.onMessage(message), metadata
import os import sys sys.path.append(os.path.join(os.path.dirname(__file__), "../../lib")) from plugin_set import PluginSet from utilities.logger import logger class AlertPluginSet(PluginSet): def send_message_to_plugin(self, plugin_class, message, metadata=None): if 'utctimestamp' in message and 'summary' in message: message_log_str = u'{0} received message: ({1}) {2}'.format(plugin_class.__module__, message['utctimestamp'], message['summary']) logger.info(message_log_str) return plugin_class.onMessage(message), metadata
Convert debug message into unicode string
Convert debug message into unicode string
Python
mpl-2.0
Phrozyn/MozDef,mozilla/MozDef,gdestuynder/MozDef,mozilla/MozDef,Phrozyn/MozDef,mozilla/MozDef,mpurzynski/MozDef,mpurzynski/MozDef,jeffbryner/MozDef,jeffbryner/MozDef,Phrozyn/MozDef,jeffbryner/MozDef,gdestuynder/MozDef,mpurzynski/MozDef,mpurzynski/MozDef,Phrozyn/MozDef,jeffbryner/MozDef,gdestuynder/MozDef,gdestuynder/MozDef,mozilla/MozDef
<REPLACE_OLD> '{0} <REPLACE_NEW> u'{0} <REPLACE_END> <|endoftext|> import os import sys sys.path.append(os.path.join(os.path.dirname(__file__), "../../lib")) from plugin_set import PluginSet from utilities.logger import logger class AlertPluginSet(PluginSet): def send_message_to_plugin(self, plugin_class, message, metadata=None): if 'utctimestamp' in message and 'summary' in message: message_log_str = u'{0} received message: ({1}) {2}'.format(plugin_class.__module__, message['utctimestamp'], message['summary']) logger.info(message_log_str) return plugin_class.onMessage(message), metadata
Convert debug message into unicode string import os import sys sys.path.append(os.path.join(os.path.dirname(__file__), "../../lib")) from plugin_set import PluginSet from utilities.logger import logger class AlertPluginSet(PluginSet): def send_message_to_plugin(self, plugin_class, message, metadata=None): if 'utctimestamp' in message and 'summary' in message: message_log_str = '{0} received message: ({1}) {2}'.format(plugin_class.__module__, message['utctimestamp'], message['summary']) logger.info(message_log_str) return plugin_class.onMessage(message), metadata
f80febf88c3f045493e75efc788d88058f021f0f
merge_sort.py
merge_sort.py
#!/usr/bin/env python # -*- coding: UTF-8 -*- def merge_sort(lyst): buf = [len(lyst)] _merge_sort(lyst, buf, 0, len(lyst)-1) def _merge_sort(lyst, buf, low, high): if low < high: middle = (low + high) // 2 _merge_sort(lyst, buf, low, middle) _merge_sort(lyst, buf, middle+1, high) merge(lyst, buf, low, middle, high) def merge(lyst, buf, low, middle, high): i1 = low i2 = middle + 1 for i in range(low, high): if i1 > middle: buf[i] = lyst[i2] i2 += 1 elif i2 > high: buf[i] = lyst[i1] i1 += 1 elif lyst[i1] < lyst[i2]: buf[i] = lyst[i] i1 += 1 else: buf[i] = lyst[i2] i2 += 1 for i in range(low, high): lyst[i] = buf[i]
#!/usr/bin/env python # -*- coding: UTF-8 -*- def merge_sort(lyst): buf = [None for x in range(len(lyst))] _merge_sort(lyst, buf, 0, len(lyst)-1) def _merge_sort(lyst, buf, low, high): if low < high: middle = (low + high) // 2 _merge_sort(lyst, buf, low, middle) _merge_sort(lyst, buf, middle+1, high) merge(lyst, buf, low, middle, high) def merge(lyst, buf, low, middle, high): i1 = low i2 = middle + 1 for i in range(low, high+1): if i1 > middle: buf[i] = lyst[i2] i2 += 1 elif i2 > high: buf[i] = lyst[i1] i1 += 1 elif lyst[i1] < lyst[i2]: buf[i] = lyst[i] i1 += 1 else: buf[i] = lyst[i2] i2 += 1 for i in range(low, high+1): lyst[i] = buf[i]
Fix initial buf variable to act as an array
Fix initial buf variable to act as an array
Python
mit
nbeck90/data_structures_2
<REPLACE_OLD> [len(lyst)] <REPLACE_NEW> [None for x in range(len(lyst))] <REPLACE_END> <REPLACE_OLD> high): <REPLACE_NEW> high+1): <REPLACE_END> <REPLACE_OLD> high): <REPLACE_NEW> high+1): <REPLACE_END> <|endoftext|> #!/usr/bin/env python # -*- coding: UTF-8 -*- def merge_sort(lyst): buf = [None for x in range(len(lyst))] _merge_sort(lyst, buf, 0, len(lyst)-1) def _merge_sort(lyst, buf, low, high): if low < high: middle = (low + high) // 2 _merge_sort(lyst, buf, low, middle) _merge_sort(lyst, buf, middle+1, high) merge(lyst, buf, low, middle, high) def merge(lyst, buf, low, middle, high): i1 = low i2 = middle + 1 for i in range(low, high+1): if i1 > middle: buf[i] = lyst[i2] i2 += 1 elif i2 > high: buf[i] = lyst[i1] i1 += 1 elif lyst[i1] < lyst[i2]: buf[i] = lyst[i] i1 += 1 else: buf[i] = lyst[i2] i2 += 1 for i in range(low, high+1): lyst[i] = buf[i]
Fix initial buf variable to act as an array #!/usr/bin/env python # -*- coding: UTF-8 -*- def merge_sort(lyst): buf = [len(lyst)] _merge_sort(lyst, buf, 0, len(lyst)-1) def _merge_sort(lyst, buf, low, high): if low < high: middle = (low + high) // 2 _merge_sort(lyst, buf, low, middle) _merge_sort(lyst, buf, middle+1, high) merge(lyst, buf, low, middle, high) def merge(lyst, buf, low, middle, high): i1 = low i2 = middle + 1 for i in range(low, high): if i1 > middle: buf[i] = lyst[i2] i2 += 1 elif i2 > high: buf[i] = lyst[i1] i1 += 1 elif lyst[i1] < lyst[i2]: buf[i] = lyst[i] i1 += 1 else: buf[i] = lyst[i2] i2 += 1 for i in range(low, high): lyst[i] = buf[i]
8112291023edff1a3803f2a3a404d83e69e1ee34
astral/api/tests/__init__.py
astral/api/tests/__init__.py
import tornado.testing from astral.api.app import NodeWebAPI from astral.models import drop_all, setup_all, create_all, session class BaseTest(tornado.testing.AsyncHTTPTestCase): def get_app(self): return NodeWebAPI() def get_http_port(self): return 8000 def setUp(self): super(BaseTest, self).setUp() setup_all() create_all() def tearDown(self): super(BaseTest, self).tearDown() session.rollback() #drop_all()
import tornado.testing from astral.api.app import NodeWebAPI from astral.models import drop_all, setup_all, create_all, session class BaseTest(tornado.testing.AsyncHTTPTestCase): def get_app(self): return NodeWebAPI() def get_http_port(self): return 8000 def setUp(self): super(BaseTest, self).setUp() setup_all() create_all() def tearDown(self): super(BaseTest, self).tearDown() session.rollback() drop_all()
Drop all tables after tests - looks like we're back in business.
Drop all tables after tests - looks like we're back in business.
Python
mit
peplin/astral
<REPLACE_OLD> #drop_all() <REPLACE_NEW> drop_all() <REPLACE_END> <|endoftext|> import tornado.testing from astral.api.app import NodeWebAPI from astral.models import drop_all, setup_all, create_all, session class BaseTest(tornado.testing.AsyncHTTPTestCase): def get_app(self): return NodeWebAPI() def get_http_port(self): return 8000 def setUp(self): super(BaseTest, self).setUp() setup_all() create_all() def tearDown(self): super(BaseTest, self).tearDown() session.rollback() drop_all()
Drop all tables after tests - looks like we're back in business. import tornado.testing from astral.api.app import NodeWebAPI from astral.models import drop_all, setup_all, create_all, session class BaseTest(tornado.testing.AsyncHTTPTestCase): def get_app(self): return NodeWebAPI() def get_http_port(self): return 8000 def setUp(self): super(BaseTest, self).setUp() setup_all() create_all() def tearDown(self): super(BaseTest, self).tearDown() session.rollback() #drop_all()
fe7d96c9182831613f4f44bc6c4f5903c7e02858
setup.py
setup.py
from setuptools import setup def fread(fn): return open(fn, 'rb').read().decode('utf-8') setup( name='tox-travis', description='Seamless integration of Tox into Travis CI', long_description=fread('README.rst') + '\n\n' + fread('HISTORY.rst'), author='Ryan Hiebert', author_email='[email protected]', url='https://github.com/ryanhiebert/tox-travis', license='MIT', version='0.1', py_modules=['tox_travis'], entry_points={ 'tox': ['travis = tox_travis'], }, install_requires=['tox>=2.0'], classifiers=[ 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', ], )
from setuptools import setup def fread(fn): return open(fn, 'rb').read().decode('utf-8') setup( name='tox-travis', description='Seamless integration of Tox into Travis CI', long_description=fread('README.rst') + '\n\n' + fread('HISTORY.rst'), author='Ryan Hiebert', author_email='[email protected]', url='https://github.com/ryanhiebert/tox-travis', license='MIT', version='0.1', py_modules=['tox_travis'], entry_points={ 'tox': ['travis = tox_travis'], }, install_requires=['tox>=2.0'], classifiers=[ 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ], )
Add Python 3.5 trove classifier
Add Python 3.5 trove classifier
Python
mit
rpkilby/tox-travis,ryanhiebert/tox-travis,tox-dev/tox-travis
<INSERT> 'Programming Language :: Python :: 3.5', <INSERT_END> <|endoftext|> from setuptools import setup def fread(fn): return open(fn, 'rb').read().decode('utf-8') setup( name='tox-travis', description='Seamless integration of Tox into Travis CI', long_description=fread('README.rst') + '\n\n' + fread('HISTORY.rst'), author='Ryan Hiebert', author_email='[email protected]', url='https://github.com/ryanhiebert/tox-travis', license='MIT', version='0.1', py_modules=['tox_travis'], entry_points={ 'tox': ['travis = tox_travis'], }, install_requires=['tox>=2.0'], classifiers=[ 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ], )
Add Python 3.5 trove classifier from setuptools import setup def fread(fn): return open(fn, 'rb').read().decode('utf-8') setup( name='tox-travis', description='Seamless integration of Tox into Travis CI', long_description=fread('README.rst') + '\n\n' + fread('HISTORY.rst'), author='Ryan Hiebert', author_email='[email protected]', url='https://github.com/ryanhiebert/tox-travis', license='MIT', version='0.1', py_modules=['tox_travis'], entry_points={ 'tox': ['travis = tox_travis'], }, install_requires=['tox>=2.0'], classifiers=[ 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', ], )
3617400a7c0915920384d15ff273aa4c8a805d9c
core/byzantinerandomizedconsensus.py
core/byzantinerandomizedconsensus.py
from base.consensus import Consensus class ByzantineRandomizedConsensus(Consensus): """ Implements a Byzantine Fault Tolerant Randomized Consensus Broadcast protocol. """ def propose(self, message): pass def decide(self): pass
Update Byzantine Randomized Consensus protocol class
Update Byzantine Randomized Consensus protocol class
Python
mit
koevskinikola/ByzantineRandomizedConsensus
<INSERT> from base.consensus import Consensus class ByzantineRandomizedConsensus(Consensus): <INSERT_END> <INSERT> """ Implements a Byzantine Fault Tolerant Randomized Consensus Broadcast protocol. """ def propose(self, message): pass def decide(self): pass <INSERT_END> <|endoftext|> from base.consensus import Consensus class ByzantineRandomizedConsensus(Consensus): """ Implements a Byzantine Fault Tolerant Randomized Consensus Broadcast protocol. """ def propose(self, message): pass def decide(self): pass
Update Byzantine Randomized Consensus protocol class
11bb0d7aa106e1cafee8b4f00bf75a2aa02e97cf
SecC/ErrMer_Proto.py
SecC/ErrMer_Proto.py
from __future__ import division import numpy as np from utilBMF.HTSUtils import pFastqProxy, pFastqFile consInFq1 = pFastqFile("/home/brett/Projects/BMFTools_Devel/lamda_data/lamda-50di" "v_S4_L001_R1_001.fastq.rescued.shaded.BS.fastq") consInFq2 = pFastqFile("/home/brett/Projects/BMFTools_Devel/lamda_data/kmer_test/" "")
Add small script to start calculating error from kmer motifs, prototyping
Add small script to start calculating error from kmer motifs, prototyping
Python
mit
ARUP-NGS/BMFtools,ARUP-NGS/BMFtools,ARUP-NGS/BMFtools
<INSERT> from __future__ import division import numpy as np from utilBMF.HTSUtils import pFastqProxy, pFastqFile consInFq1 = pFastqFile("/home/brett/Projects/BMFTools_Devel/lamda_data/lamda-50di" <INSERT_END> <INSERT> "v_S4_L001_R1_001.fastq.rescued.shaded.BS.fastq") consInFq2 = pFastqFile("/home/brett/Projects/BMFTools_Devel/lamda_data/kmer_test/" "") <INSERT_END> <|endoftext|> from __future__ import division import numpy as np from utilBMF.HTSUtils import pFastqProxy, pFastqFile consInFq1 = pFastqFile("/home/brett/Projects/BMFTools_Devel/lamda_data/lamda-50di" "v_S4_L001_R1_001.fastq.rescued.shaded.BS.fastq") consInFq2 = pFastqFile("/home/brett/Projects/BMFTools_Devel/lamda_data/kmer_test/" "")
Add small script to start calculating error from kmer motifs, prototyping
0d36640d47c30d8b9cd2b2eff1c8ccf1e97c13c5
subscriptions/management/commands/add_missed_call_service_audio_notification_to_active_subscriptions.py
subscriptions/management/commands/add_missed_call_service_audio_notification_to_active_subscriptions.py
from django.core.exceptions import ObjectDoesNotExist from django.core.management.base import BaseCommand, CommandError from subscriptions.models import Subscription class Command(BaseCommand): help = ("Active subscription holders need to be informed via audio file " "about the new missed call service.") def handle(self, *args, **options): self.stdout.write("Processing active subscriptions ...") count = 0 try: active_subscriptions_list = list( Subscription.objects.filter(active=True)) except ObjectDoesNotExist: self.stdout.write("No active subscriptions found") if len(active_subscriptions_list) > 0: for active_subscription in active_subscriptions_list: # Add audio file to subscription meta_data. Not sure how we'll # handle translations here. if (active_subscription.metadata is not None and "welcome_message" not in active_subscription.metadata): active_subscription["audo_file_url"] = "audio_file_url" count += 1 if count > 0: self.stdout.write( "Update {} subscriptions with voice notes".format(count)) else: self.stdout.write( "No subscriptions updated with audio file notes")
Add missed call service audio notification to active subscriptions
Add missed call service audio notification to active subscriptions
Python
bsd-3-clause
praekelt/seed-staged-based-messaging,praekelt/seed-stage-based-messaging,praekelt/seed-stage-based-messaging
<REPLACE_OLD> <REPLACE_NEW> from django.core.exceptions import ObjectDoesNotExist from django.core.management.base import BaseCommand, CommandError from subscriptions.models import Subscription class Command(BaseCommand): help = ("Active subscription holders need to be informed via audio file " "about the new missed call service.") def handle(self, *args, **options): self.stdout.write("Processing active subscriptions ...") count = 0 try: active_subscriptions_list = list( Subscription.objects.filter(active=True)) except ObjectDoesNotExist: self.stdout.write("No active subscriptions found") if len(active_subscriptions_list) > 0: for active_subscription in active_subscriptions_list: # Add audio file to subscription meta_data. Not sure how we'll # handle translations here. if (active_subscription.metadata is not None and "welcome_message" not in active_subscription.metadata): active_subscription["audo_file_url"] = "audio_file_url" count += 1 if count > 0: self.stdout.write( "Update {} subscriptions with voice notes".format(count)) else: self.stdout.write( "No subscriptions updated with audio file notes") <REPLACE_END> <|endoftext|> from django.core.exceptions import ObjectDoesNotExist from django.core.management.base import BaseCommand, CommandError from subscriptions.models import Subscription class Command(BaseCommand): help = ("Active subscription holders need to be informed via audio file " "about the new missed call service.") def handle(self, *args, **options): self.stdout.write("Processing active subscriptions ...") count = 0 try: active_subscriptions_list = list( Subscription.objects.filter(active=True)) except ObjectDoesNotExist: self.stdout.write("No active subscriptions found") if len(active_subscriptions_list) > 0: for active_subscription in active_subscriptions_list: # Add audio file to subscription meta_data. Not sure how we'll # handle translations here. if (active_subscription.metadata is not None and "welcome_message" not in active_subscription.metadata): active_subscription["audo_file_url"] = "audio_file_url" count += 1 if count > 0: self.stdout.write( "Update {} subscriptions with voice notes".format(count)) else: self.stdout.write( "No subscriptions updated with audio file notes")
Add missed call service audio notification to active subscriptions
77f4fca43b1d4be85894ad565801d8a333008fdc
Lib/test/test_pep263.py
Lib/test/test_pep263.py
#! -*- coding: koi8-r -*- import unittest from test import test_support class PEP263Test(unittest.TestCase): def test_pep263(self): self.assertEqual( u"".encode("utf-8"), '\xd0\x9f\xd0\xb8\xd1\x82\xd0\xbe\xd0\xbd' ) self.assertEqual( u"\".encode("utf-8"), '\\\xd0\x9f' ) def test_main(): test_support.run_unittest(PEP263Test) if __name__=="__main__": test_main()
#! -*- coding: koi8-r -*- # This file is marked as binary in the CVS, to prevent MacCVS from recoding it. import unittest from test import test_support class PEP263Test(unittest.TestCase): def test_pep263(self): self.assertEqual( u"".encode("utf-8"), '\xd0\x9f\xd0\xb8\xd1\x82\xd0\xbe\xd0\xbd' ) self.assertEqual( u"\".encode("utf-8"), '\\\xd0\x9f' ) def test_main(): test_support.run_unittest(PEP263Test) if __name__=="__main__": test_main()
Add a comment explaining -kb.
Add a comment explaining -kb.
Python
mit
sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator
<REPLACE_OLD> -*- import <REPLACE_NEW> -*- # This file is marked as binary in the CVS, to prevent MacCVS from recoding it. import <REPLACE_END> <|endoftext|> #! -*- coding: koi8-r -*- # This file is marked as binary in the CVS, to prevent MacCVS from recoding it. import unittest from test import test_support class PEP263Test(unittest.TestCase): def test_pep263(self): self.assertEqual( u"".encode("utf-8"), '\xd0\x9f\xd0\xb8\xd1\x82\xd0\xbe\xd0\xbd' ) self.assertEqual( u"\".encode("utf-8"), '\\\xd0\x9f' ) def test_main(): test_support.run_unittest(PEP263Test) if __name__=="__main__": test_main()
Add a comment explaining -kb. #! -*- coding: koi8-r -*- import unittest from test import test_support class PEP263Test(unittest.TestCase): def test_pep263(self): self.assertEqual( u"".encode("utf-8"), '\xd0\x9f\xd0\xb8\xd1\x82\xd0\xbe\xd0\xbd' ) self.assertEqual( u"\".encode("utf-8"), '\\\xd0\x9f' ) def test_main(): test_support.run_unittest(PEP263Test) if __name__=="__main__": test_main()
4666849791cad70ae1bb907a2dcc35ccfc0b7de4
backend/populate_dimkarakostas.py
backend/populate_dimkarakostas.py
from string import ascii_lowercase import django import os os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings') django.setup() from breach.models import Target, Victim endpoint = 'https://dimkarakostas.com/rupture/test.php?ref=%s' prefix = 'imper' alphabet = ascii_lowercase secretlength = 9 target_1 = Target( endpoint=endpoint, prefix=prefix, alphabet=alphabet, secretlength=secretlength ) target_1.save() print 'Created Target:\n\tendpoint: {}\n\tprefix: {}\n\talphabet: {}\n\tsecretlength: {}'.format(endpoint, prefix, alphabet, secretlength) snifferendpoint = 'http://127.0.0.1:9000' sourceip = '192.168.1.70' victim_1 = Victim( target=target_1, snifferendpoint=snifferendpoint, sourceip=sourceip, # method='serial' ) victim_1.save() print 'Created Victim:\n\tvictim_id: {}\n\tsnifferendpoint: {}\n\tsourceip: {}'.format(victim_1.id, snifferendpoint, sourceip)
from string import ascii_lowercase import django import os import string os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings') django.setup() from breach.models import Target, Victim endpoint = 'https://dimkarakostas.com/rupture/test.php?ref=%s' prefix = 'imper' alphabet = ascii_lowercase secretlength = 9 target_1 = Target( endpoint=endpoint, prefix=prefix, alphabet=alphabet, secretlength=secretlength, alignmentalphabet=string.ascii_uppercase ) target_1.save() print 'Created Target:\n\tendpoint: {}\n\tprefix: {}\n\talphabet: {}\n\tsecretlength: {}'.format(endpoint, prefix, alphabet, secretlength) snifferendpoint = 'http://127.0.0.1:9000' sourceip = '192.168.1.70' victim_1 = Victim( target=target_1, snifferendpoint=snifferendpoint, sourceip=sourceip, # method='serial' ) victim_1.save() print 'Created Victim:\n\tvictim_id: {}\n\tsnifferendpoint: {}\n\tsourceip: {}'.format(victim_1.id, snifferendpoint, sourceip)
Update dimkarakostas population with alignmentalphabet
Update dimkarakostas population with alignmentalphabet
Python
mit
esarafianou/rupture,dionyziz/rupture,esarafianou/rupture,dionyziz/rupture,dimriou/rupture,dionyziz/rupture,dimriou/rupture,esarafianou/rupture,dionyziz/rupture,dimkarakostas/rupture,esarafianou/rupture,dimkarakostas/rupture,dionyziz/rupture,dimriou/rupture,dimkarakostas/rupture,dimkarakostas/rupture,dimkarakostas/rupture,dimriou/rupture,dimriou/rupture
<REPLACE_OLD> os os.environ.setdefault('DJANGO_SETTINGS_MODULE', <REPLACE_NEW> os import string os.environ.setdefault('DJANGO_SETTINGS_MODULE', <REPLACE_END> <REPLACE_OLD> secretlength=secretlength ) target_1.save() print <REPLACE_NEW> secretlength=secretlength, alignmentalphabet=string.ascii_uppercase ) target_1.save() print <REPLACE_END> <|endoftext|> from string import ascii_lowercase import django import os import string os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings') django.setup() from breach.models import Target, Victim endpoint = 'https://dimkarakostas.com/rupture/test.php?ref=%s' prefix = 'imper' alphabet = ascii_lowercase secretlength = 9 target_1 = Target( endpoint=endpoint, prefix=prefix, alphabet=alphabet, secretlength=secretlength, alignmentalphabet=string.ascii_uppercase ) target_1.save() print 'Created Target:\n\tendpoint: {}\n\tprefix: {}\n\talphabet: {}\n\tsecretlength: {}'.format(endpoint, prefix, alphabet, secretlength) snifferendpoint = 'http://127.0.0.1:9000' sourceip = '192.168.1.70' victim_1 = Victim( target=target_1, snifferendpoint=snifferendpoint, sourceip=sourceip, # method='serial' ) victim_1.save() print 'Created Victim:\n\tvictim_id: {}\n\tsnifferendpoint: {}\n\tsourceip: {}'.format(victim_1.id, snifferendpoint, sourceip)
Update dimkarakostas population with alignmentalphabet from string import ascii_lowercase import django import os os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings') django.setup() from breach.models import Target, Victim endpoint = 'https://dimkarakostas.com/rupture/test.php?ref=%s' prefix = 'imper' alphabet = ascii_lowercase secretlength = 9 target_1 = Target( endpoint=endpoint, prefix=prefix, alphabet=alphabet, secretlength=secretlength ) target_1.save() print 'Created Target:\n\tendpoint: {}\n\tprefix: {}\n\talphabet: {}\n\tsecretlength: {}'.format(endpoint, prefix, alphabet, secretlength) snifferendpoint = 'http://127.0.0.1:9000' sourceip = '192.168.1.70' victim_1 = Victim( target=target_1, snifferendpoint=snifferendpoint, sourceip=sourceip, # method='serial' ) victim_1.save() print 'Created Victim:\n\tvictim_id: {}\n\tsnifferendpoint: {}\n\tsourceip: {}'.format(victim_1.id, snifferendpoint, sourceip)
7c33ff3ff94b933fe9e5c8b53fa08041ef8ee404
runserver.py
runserver.py
from geomancer import create_app app = create_app() if __name__ == "__main__": app.run(debug=True)
from geomancer import create_app app = create_app() if __name__ == "__main__": import sys try: port = int(sys.argv[1]) except IndexError: port = 5000 app.run(debug=True, port=port)
Make it so we can choose port
Make it so we can choose port
Python
mit
associatedpress/geomancer,associatedpress/geomancer,datamade/geomancer,associatedpress/geomancer,datamade/geomancer,datamade/geomancer
<REPLACE_OLD> app.run(debug=True) <REPLACE_NEW> import sys try: port = int(sys.argv[1]) except IndexError: port = 5000 app.run(debug=True, port=port) <REPLACE_END> <|endoftext|> from geomancer import create_app app = create_app() if __name__ == "__main__": import sys try: port = int(sys.argv[1]) except IndexError: port = 5000 app.run(debug=True, port=port)
Make it so we can choose port from geomancer import create_app app = create_app() if __name__ == "__main__": app.run(debug=True)
90ab0bfbac851a52f0e48f5186a727692e699a6f
geodj/youtube.py
geodj/youtube.py
from gdata.youtube.service import YouTubeService, YouTubeVideoQuery class YoutubeMusic: def __init__(self): self.service = YouTubeService() def search(self, artist): query = YouTubeVideoQuery() query.vq = artist query.orderby = 'viewCount' query.racy = 'exclude' query.categories.append("/Music") feed = self.service.YouTubeQuery(query) results = [] for entry in feed.entry: if not self.is_valid_entry(artist, entry): continue results.append({ 'url': entry.media.player.url, 'duration': int(entry.media.duration.seconds), }) return results def is_valid_entry(self, artist, entry): duration = int(entry.media.duration.seconds) if entry.rating is not None and float(entry.rating.average) < 3: return False if duration < (2 * 60) or duration > (9 * 60): return False if artist.lower() not in entry.media.title.text.lower(): return False return True
from gdata.youtube.service import YouTubeService, YouTubeVideoQuery from django.utils.encoding import smart_str class YoutubeMusic: def __init__(self): self.service = YouTubeService() def search(self, artist): query = YouTubeVideoQuery() query.vq = artist query.orderby = 'viewCount' query.racy = 'exclude' query.categories.append("/Music") feed = self.service.YouTubeQuery(query) results = [] for entry in feed.entry: if not self.is_valid_entry(artist, entry): continue results.append({ 'url': entry.media.player.url, 'title': smart_str(entry.media.title.text), 'duration': int(entry.media.duration.seconds), }) return {'artist': smart_str(artist), 'results': results} def is_valid_entry(self, artist, entry): duration = int(entry.media.duration.seconds) if entry.rating is not None and float(entry.rating.average) < 3: return False if duration < (2 * 60) or duration > (9 * 60): return False if smart_str(artist).lower() not in smart_str(entry.media.title.text).lower(): return False return True
Use smart_str and include artist in results
Use smart_str and include artist in results
Python
mit
6/GeoDJ,6/GeoDJ
<REPLACE_OLD> YouTubeVideoQuery class <REPLACE_NEW> YouTubeVideoQuery from django.utils.encoding import smart_str class <REPLACE_END> <INSERT> 'title': smart_str(entry.media.title.text), <INSERT_END> <REPLACE_OLD> results <REPLACE_NEW> {'artist': smart_str(artist), 'results': results} <REPLACE_END> <REPLACE_OLD> artist.lower() <REPLACE_NEW> smart_str(artist).lower() <REPLACE_END> <REPLACE_OLD> entry.media.title.text.lower(): <REPLACE_NEW> smart_str(entry.media.title.text).lower(): <REPLACE_END> <|endoftext|> from gdata.youtube.service import YouTubeService, YouTubeVideoQuery from django.utils.encoding import smart_str class YoutubeMusic: def __init__(self): self.service = YouTubeService() def search(self, artist): query = YouTubeVideoQuery() query.vq = artist query.orderby = 'viewCount' query.racy = 'exclude' query.categories.append("/Music") feed = self.service.YouTubeQuery(query) results = [] for entry in feed.entry: if not self.is_valid_entry(artist, entry): continue results.append({ 'url': entry.media.player.url, 'title': smart_str(entry.media.title.text), 'duration': int(entry.media.duration.seconds), }) return {'artist': smart_str(artist), 'results': results} def is_valid_entry(self, artist, entry): duration = int(entry.media.duration.seconds) if entry.rating is not None and float(entry.rating.average) < 3: return False if duration < (2 * 60) or duration > (9 * 60): return False if smart_str(artist).lower() not in smart_str(entry.media.title.text).lower(): return False return True
Use smart_str and include artist in results from gdata.youtube.service import YouTubeService, YouTubeVideoQuery class YoutubeMusic: def __init__(self): self.service = YouTubeService() def search(self, artist): query = YouTubeVideoQuery() query.vq = artist query.orderby = 'viewCount' query.racy = 'exclude' query.categories.append("/Music") feed = self.service.YouTubeQuery(query) results = [] for entry in feed.entry: if not self.is_valid_entry(artist, entry): continue results.append({ 'url': entry.media.player.url, 'duration': int(entry.media.duration.seconds), }) return results def is_valid_entry(self, artist, entry): duration = int(entry.media.duration.seconds) if entry.rating is not None and float(entry.rating.average) < 3: return False if duration < (2 * 60) or duration > (9 * 60): return False if artist.lower() not in entry.media.title.text.lower(): return False return True
a2b1d10e042d135c3c014622ffeabd7e96a46f9f
tests/test_update_target.py
tests/test_update_target.py
""" Tests for helper function for updating a target from a Vuforia database. """ import io import pytest from vws import VWS from vws.exceptions import UnknownTarget class TestUpdateTarget: """ Test for updating a target. """ def test_get_target( self, client: VWS, high_quality_image: io.BytesIO, ) -> None: """ Details of a target are returned by ``get_target``. """ target_id = client.add_target( name='x', width=1, image=high_quality_image, ) client.update_target(target_id=target_id) result = client.get_target(target_id=target_id) expected_keys = { 'target_id', 'active_flag', 'name', 'width', 'tracking_rating', 'reco_rating', } assert result['target_record'].keys() == expected_keys def test_no_such_target( self, client: VWS, high_quality_image: io.BytesIO, ) -> None: """ An ``UnknownTarget`` exception is raised when getting a target which does not exist. """ with pytest.raises(UnknownTarget): client.get_target(target_id='a')
""" Tests for helper function for updating a target from a Vuforia database. """ import io import pytest from vws import VWS from vws.exceptions import UnknownTarget class TestUpdateTarget: """ Test for updating a target. """ def test_get_target( self, client: VWS, high_quality_image: io.BytesIO, ) -> None: """ Details of a target are returned by ``get_target``. """ # target_id = client.add_target( # name='x', # width=1, # image=high_quality_image, # ) # # client.update_target(target_id=target_id) # result = client.get_target(target_id=target_id) # expected_keys = { # 'target_id', # 'active_flag', # 'name', # 'width', # 'tracking_rating', # 'reco_rating', # } # assert result['target_record'].keys() == expected_keys # # def test_no_such_target( # self, # client: VWS, # high_quality_image: io.BytesIO, # ) -> None: # """ # An ``UnknownTarget`` exception is raised when getting a target which # does not exist. # """ # with pytest.raises(UnknownTarget): # client.get_target(target_id='a')
Comment out part done code
Comment out part done code
Python
mit
adamtheturtle/vws-python,adamtheturtle/vws-python
<INSERT> # <INSERT_END> <INSERT> # <INSERT_END> <INSERT> # <INSERT_END> <INSERT> # <INSERT_END> <REPLACE_OLD> ) <REPLACE_NEW> # ) # # <REPLACE_END> <INSERT> # <INSERT_END> <INSERT> # <INSERT_END> <INSERT> # <INSERT_END> <INSERT> # <INSERT_END> <INSERT> # <INSERT_END> <INSERT> # <INSERT_END> <INSERT> # <INSERT_END> <INSERT> # <INSERT_END> <INSERT> # <INSERT_END> <INSERT> # <INSERT_END> <REPLACE_OLD> expected_keys <REPLACE_NEW> expected_keys # # <REPLACE_END> <INSERT> # <INSERT_END> <INSERT> # <INSERT_END> <INSERT> # <INSERT_END> <INSERT> # <INSERT_END> <INSERT> # <INSERT_END> <INSERT> # <INSERT_END> <INSERT> # <INSERT_END> <INSERT> # <INSERT_END> <INSERT> # <INSERT_END> <INSERT> # <INSERT_END> <|endoftext|> """ Tests for helper function for updating a target from a Vuforia database. """ import io import pytest from vws import VWS from vws.exceptions import UnknownTarget class TestUpdateTarget: """ Test for updating a target. """ def test_get_target( self, client: VWS, high_quality_image: io.BytesIO, ) -> None: """ Details of a target are returned by ``get_target``. """ # target_id = client.add_target( # name='x', # width=1, # image=high_quality_image, # ) # # client.update_target(target_id=target_id) # result = client.get_target(target_id=target_id) # expected_keys = { # 'target_id', # 'active_flag', # 'name', # 'width', # 'tracking_rating', # 'reco_rating', # } # assert result['target_record'].keys() == expected_keys # # def test_no_such_target( # self, # client: VWS, # high_quality_image: io.BytesIO, # ) -> None: # """ # An ``UnknownTarget`` exception is raised when getting a target which # does not exist. # """ # with pytest.raises(UnknownTarget): # client.get_target(target_id='a')
Comment out part done code """ Tests for helper function for updating a target from a Vuforia database. """ import io import pytest from vws import VWS from vws.exceptions import UnknownTarget class TestUpdateTarget: """ Test for updating a target. """ def test_get_target( self, client: VWS, high_quality_image: io.BytesIO, ) -> None: """ Details of a target are returned by ``get_target``. """ target_id = client.add_target( name='x', width=1, image=high_quality_image, ) client.update_target(target_id=target_id) result = client.get_target(target_id=target_id) expected_keys = { 'target_id', 'active_flag', 'name', 'width', 'tracking_rating', 'reco_rating', } assert result['target_record'].keys() == expected_keys def test_no_such_target( self, client: VWS, high_quality_image: io.BytesIO, ) -> None: """ An ``UnknownTarget`` exception is raised when getting a target which does not exist. """ with pytest.raises(UnknownTarget): client.get_target(target_id='a')
3732b2ee099989ed46e264f031b9b47c414cf6c6
imagekit/importers.py
imagekit/importers.py
import re import sys class ProcessorImporter(object): """ The processors were moved to the PILKit project so they could be used separtely from ImageKit (which has a bunch of Django dependencies). However, there's no real need to expose this fact (and we want to maintain backwards compatibility), so we proxy all "imagekit.processors" imports to "pilkit.processors" using this object. """ pattern = re.compile(r'^imagekit\.processors((\..*)?)$') def find_module(self, name, path=None): if self.pattern.match(name): return self def load_module(self, name): if name in sys.modules: return sys.modules[name] from django.utils.importlib import import_module new_name = self.pattern.sub(r'pilkit.processors\1', name) return import_module(new_name) sys.meta_path.append(ProcessorImporter())
import re import sys class ProcessorImporter(object): """ The processors were moved to the PILKit project so they could be used separtely from ImageKit (which has a bunch of Django dependencies). However, there's no real need to expose this fact (and we want to maintain backwards compatibility), so we proxy all "imagekit.processors" imports to "pilkit.processors" using this object. """ pattern = re.compile(r'^imagekit\.processors((\..*)?)$') def find_module(self, name, path=None): if self.pattern.match(name): return self def load_module(self, name): if name in sys.modules: return sys.modules[name] from django.utils.importlib import import_module new_name = self.pattern.sub(r'pilkit.processors\1', name) return import_module(new_name) sys.meta_path.insert(0, ProcessorImporter())
Insert importer at beginning of list
Insert importer at beginning of list
Python
bsd-3-clause
tawanda/django-imagekit,FundedByMe/django-imagekit,tawanda/django-imagekit,FundedByMe/django-imagekit
<REPLACE_OLD> import_module(new_name) sys.meta_path.append(ProcessorImporter()) <REPLACE_NEW> import_module(new_name) sys.meta_path.insert(0, ProcessorImporter()) <REPLACE_END> <|endoftext|> import re import sys class ProcessorImporter(object): """ The processors were moved to the PILKit project so they could be used separtely from ImageKit (which has a bunch of Django dependencies). However, there's no real need to expose this fact (and we want to maintain backwards compatibility), so we proxy all "imagekit.processors" imports to "pilkit.processors" using this object. """ pattern = re.compile(r'^imagekit\.processors((\..*)?)$') def find_module(self, name, path=None): if self.pattern.match(name): return self def load_module(self, name): if name in sys.modules: return sys.modules[name] from django.utils.importlib import import_module new_name = self.pattern.sub(r'pilkit.processors\1', name) return import_module(new_name) sys.meta_path.insert(0, ProcessorImporter())
Insert importer at beginning of list import re import sys class ProcessorImporter(object): """ The processors were moved to the PILKit project so they could be used separtely from ImageKit (which has a bunch of Django dependencies). However, there's no real need to expose this fact (and we want to maintain backwards compatibility), so we proxy all "imagekit.processors" imports to "pilkit.processors" using this object. """ pattern = re.compile(r'^imagekit\.processors((\..*)?)$') def find_module(self, name, path=None): if self.pattern.match(name): return self def load_module(self, name): if name in sys.modules: return sys.modules[name] from django.utils.importlib import import_module new_name = self.pattern.sub(r'pilkit.processors\1', name) return import_module(new_name) sys.meta_path.append(ProcessorImporter())
cd827059f9c500603d5c6b1d1bdf1621dc87a6a2
pyaem/handlers.py
pyaem/handlers.py
from BeautifulSoup import * import exception def auth_fail(response, **kwargs): code = response['http_code'] message = 'Authentication failed - incorrect username and/or password' raise exception.PyAemException(code, message) def method_not_allowed(response, **kwargs): code = response['http_code'] soup = BeautifulSoup(response['body']) message = soup.p.string raise exception.PyAemException(code, message) def unexpected(response, **kwargs): code = response['http_code'] message = 'Unexpected error' raise exception.PyAemException(code, message)
from BeautifulSoup import * import exception def auth_fail(response, **kwargs): code = response['http_code'] message = 'Authentication failed - incorrect username and/or password' raise exception.PyAemException(code, message) def method_not_allowed(response, **kwargs): code = response['http_code'] soup = BeautifulSoup(response['body']) message = soup.p.string raise exception.PyAemException(code, message) def unexpected(response, **kwargs): code = response['http_code'] message = 'Unexpected response http code {0} and body\n{1}'.format(response['http_code'], response['body']) raise exception.PyAemException(code, message)
Update unexpected handler message, with http code and body.
Update unexpected handler message, with http code and body.
Python
mit
wildone/pyaem,Sensis/pyaem
<REPLACE_OLD> error' raise <REPLACE_NEW> response http code {0} and body\n{1}'.format(response['http_code'], response['body']) raise <REPLACE_END> <|endoftext|> from BeautifulSoup import * import exception def auth_fail(response, **kwargs): code = response['http_code'] message = 'Authentication failed - incorrect username and/or password' raise exception.PyAemException(code, message) def method_not_allowed(response, **kwargs): code = response['http_code'] soup = BeautifulSoup(response['body']) message = soup.p.string raise exception.PyAemException(code, message) def unexpected(response, **kwargs): code = response['http_code'] message = 'Unexpected response http code {0} and body\n{1}'.format(response['http_code'], response['body']) raise exception.PyAemException(code, message)
Update unexpected handler message, with http code and body. from BeautifulSoup import * import exception def auth_fail(response, **kwargs): code = response['http_code'] message = 'Authentication failed - incorrect username and/or password' raise exception.PyAemException(code, message) def method_not_allowed(response, **kwargs): code = response['http_code'] soup = BeautifulSoup(response['body']) message = soup.p.string raise exception.PyAemException(code, message) def unexpected(response, **kwargs): code = response['http_code'] message = 'Unexpected error' raise exception.PyAemException(code, message)
edd5adc9be2a700421bd8e98af825322796b8714
dns/models.py
dns/models.py
from google.appengine.ext import db TOP_LEVEL_DOMAINS = 'com net org biz info'.split() class Lookup(db.Model): """ The datastore key name is the domain name, without top level. IP address fields use 0 (zero) for NXDOMAIN because None is returned for missing properties. Updates since 2010-01-01 use negative numbers for 60 bit hashes of the SOA server name, see tools/update_dns.py. """ backwards = db.StringProperty(required=True) # For suffix matching. timestamp = db.DateTimeProperty(required=True) # Created or updated. com = db.IntegerProperty(indexed=False) net = db.IntegerProperty(indexed=False) org = db.IntegerProperty(indexed=False) biz = db.IntegerProperty(indexed=False) info = db.IntegerProperty(indexed=False)
from google.appengine.ext import db TOP_LEVEL_DOMAINS = """ com net org biz info ag am at be by ch ck de es eu fm in io is it la li ly me mobi ms name ru se sh sy tel th to travel tv us """.split() # Omitting nu, ph, st, ws because they don't seem to have NXDOMAIN. class UpgradeStringProperty(db.IntegerProperty): def validate(self, value): return unicode(value) if value else u'' class Lookup(db.Expando): """ The datastore key name is the domain name, without top level. IP address fields use 0 (zero) for NXDOMAIN because None is returned for missing properties. Some updates on 2010-01-01 use negative numbers for 60 bit hashes of the SOA server name. Since 2010-01-02, this model inherits from Expando to flexibly add more top level domains. Each property stores the authority name server as string backwards, e.g. com.1and1.ns1 for better sorting. """ backwards = db.StringProperty(required=True) # For suffix matching. timestamp = db.DateTimeProperty(required=True) # Created or updated. com = UpgradeStringProperty() net = UpgradeStringProperty() org = UpgradeStringProperty() biz = UpgradeStringProperty() info = UpgradeStringProperty()
Upgrade Lookup model to Expando and DNS result properties from integer to string.
Upgrade Lookup model to Expando and DNS result properties from integer to string.
Python
mit
jcrocholl/nxdom,jcrocholl/nxdom
<REPLACE_OLD> 'com <REPLACE_NEW> """ com <REPLACE_END> <REPLACE_OLD> info'.split() class Lookup(db.Model): <REPLACE_NEW> info ag am at be by ch ck de es eu fm in io is it la li ly me mobi ms name ru se sh sy tel th to travel tv us """.split() # Omitting nu, ph, st, ws because they don't seem to have NXDOMAIN. class UpgradeStringProperty(db.IntegerProperty): def validate(self, value): return unicode(value) if value else u'' class Lookup(db.Expando): <REPLACE_END> <REPLACE_OLD> Updates since <REPLACE_NEW> Some updates on <REPLACE_END> <REPLACE_OLD> name, see tools/update_dns.py. <REPLACE_NEW> name. Since 2010-01-02, this model inherits from Expando to flexibly add more top level domains. Each property stores the authority name server as string backwards, e.g. com.1and1.ns1 for better sorting. <REPLACE_END> <REPLACE_OLD> db.IntegerProperty(indexed=False) <REPLACE_NEW> UpgradeStringProperty() <REPLACE_END> <REPLACE_OLD> db.IntegerProperty(indexed=False) <REPLACE_NEW> UpgradeStringProperty() <REPLACE_END> <REPLACE_OLD> db.IntegerProperty(indexed=False) <REPLACE_NEW> UpgradeStringProperty() <REPLACE_END> <REPLACE_OLD> db.IntegerProperty(indexed=False) <REPLACE_NEW> UpgradeStringProperty() <REPLACE_END> <REPLACE_OLD> db.IntegerProperty(indexed=False) <REPLACE_NEW> UpgradeStringProperty() <REPLACE_END> <|endoftext|> from google.appengine.ext import db TOP_LEVEL_DOMAINS = """ com net org biz info ag am at be by ch ck de es eu fm in io is it la li ly me mobi ms name ru se sh sy tel th to travel tv us """.split() # Omitting nu, ph, st, ws because they don't seem to have NXDOMAIN. class UpgradeStringProperty(db.IntegerProperty): def validate(self, value): return unicode(value) if value else u'' class Lookup(db.Expando): """ The datastore key name is the domain name, without top level. IP address fields use 0 (zero) for NXDOMAIN because None is returned for missing properties. Some updates on 2010-01-01 use negative numbers for 60 bit hashes of the SOA server name. Since 2010-01-02, this model inherits from Expando to flexibly add more top level domains. Each property stores the authority name server as string backwards, e.g. com.1and1.ns1 for better sorting. """ backwards = db.StringProperty(required=True) # For suffix matching. timestamp = db.DateTimeProperty(required=True) # Created or updated. com = UpgradeStringProperty() net = UpgradeStringProperty() org = UpgradeStringProperty() biz = UpgradeStringProperty() info = UpgradeStringProperty()
Upgrade Lookup model to Expando and DNS result properties from integer to string. from google.appengine.ext import db TOP_LEVEL_DOMAINS = 'com net org biz info'.split() class Lookup(db.Model): """ The datastore key name is the domain name, without top level. IP address fields use 0 (zero) for NXDOMAIN because None is returned for missing properties. Updates since 2010-01-01 use negative numbers for 60 bit hashes of the SOA server name, see tools/update_dns.py. """ backwards = db.StringProperty(required=True) # For suffix matching. timestamp = db.DateTimeProperty(required=True) # Created or updated. com = db.IntegerProperty(indexed=False) net = db.IntegerProperty(indexed=False) org = db.IntegerProperty(indexed=False) biz = db.IntegerProperty(indexed=False) info = db.IntegerProperty(indexed=False)
066a7dacf20ed3dd123790dc78e99317856ea731
tutorial/polls/admin.py
tutorial/polls/admin.py
from django.contrib import admin # Register your models here. from .models import Question class QuestionAdmin(admin.ModelAdmin): fields = ['pub_date', 'question_text'] admin.site.register(Question, QuestionAdmin)
from django.contrib import admin # Register your models here. from .models import Question class QuestionAdmin(admin.ModelAdmin): #fields = ['pub_date', 'question_text'] fieldsets = [ (None, {'fields' : ['question_text']}), ('Date Information', { 'fields' : ['pub_date'], 'classes': ['collapse']}), ] admin.site.register(Question, QuestionAdmin)
Put Question Admin fields in a fieldset and added a collapse class to the date field
Put Question Admin fields in a fieldset and added a collapse class to the date field
Python
mit
ikosenn/django_reignited,ikosenn/django_reignited
<REPLACE_OLD> Question class QuestionAdmin(admin.ModelAdmin): <REPLACE_NEW> Question class QuestionAdmin(admin.ModelAdmin): <REPLACE_END> <REPLACE_OLD> fields <REPLACE_NEW> #fields <REPLACE_END> <REPLACE_OLD> 'question_text'] admin.site.register(Question, <REPLACE_NEW> 'question_text'] fieldsets = [ (None, {'fields' : ['question_text']}), ('Date Information', { 'fields' : ['pub_date'], 'classes': ['collapse']}), ] admin.site.register(Question, <REPLACE_END> <|endoftext|> from django.contrib import admin # Register your models here. from .models import Question class QuestionAdmin(admin.ModelAdmin): #fields = ['pub_date', 'question_text'] fieldsets = [ (None, {'fields' : ['question_text']}), ('Date Information', { 'fields' : ['pub_date'], 'classes': ['collapse']}), ] admin.site.register(Question, QuestionAdmin)
Put Question Admin fields in a fieldset and added a collapse class to the date field from django.contrib import admin # Register your models here. from .models import Question class QuestionAdmin(admin.ModelAdmin): fields = ['pub_date', 'question_text'] admin.site.register(Question, QuestionAdmin)
10be723bf9396c3e513d09ce2a16a3aee0eebe36
setup.py
setup.py
#!/usr/bin/env python import os from distutils.core import setup, Extension, Command from distutils.command.sdist import sdist from distutils.command.build_py import build_py from numpy import get_include as get_numpy_include numpy_includes = get_numpy_include() ext_modules = [Extension("reproject._overlap_wrapper", ['reproject/_overlap_wrapper.c', 'reproject/overlapArea.c'], include_dirs=[numpy_includes])] class PyTest(Command): user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): import sys,subprocess errno = subprocess.call([sys.executable, 'runtests.py']) raise SystemExit(errno) setup(name='reproject', version="0.1.0", author='Thomas Robitaille', author_email='[email protected]', packages=['reproject', 'reproject.tests'], cmdclass = {'test': PyTest}, ext_modules = ext_modules )
#!/usr/bin/env python import os from distutils.core import setup, Extension, Command from distutils.command.sdist import sdist from distutils.command.build_py import build_py from numpy import get_include as get_numpy_include numpy_includes = get_numpy_include() ext_modules = [Extension("reproject._overlap_wrapper", ['reproject/_overlap_wrapper.c', 'reproject/overlapArea.c'], include_dirs=[numpy_includes])] class PyTest(Command): user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): import os import shutil import tempfile # First ensure that we build the package so that 2to3 gets executed self.reinitialize_command('build') self.run_command('build') build_cmd = self.get_finalized_command('build') new_path = os.path.abspath(build_cmd.build_lib) # Copy the build to a temporary directory for the purposes of testing # - this avoids creating pyc and __pycache__ directories inside the # build directory tmp_dir = tempfile.mkdtemp(prefix='reprojection-test-') testing_path = os.path.join(tmp_dir, os.path.basename(new_path)) shutil.copytree(new_path, testing_path) import sys import subprocess errno = subprocess.call([sys.executable, os.path.abspath('runtests.py')], cwd=testing_path) raise SystemExit(errno) setup(name='reproject', version="0.1.0", author='Thomas Robitaille', author_email='[email protected]', packages=['reproject', 'reproject.tests'], cmdclass = {'test': PyTest}, ext_modules = ext_modules )
Make sure the package is built before it is tested
Make sure the package is built before it is tested
Python
bsd-3-clause
barentsen/reproject,mwcraig/reproject,astrofrog/reproject,astrofrog/reproject,bsipocz/reproject,barentsen/reproject,barentsen/reproject,astrofrog/reproject,bsipocz/reproject,mwcraig/reproject
<REPLACE_OLD> PyTest(Command): <REPLACE_NEW> PyTest(Command): <REPLACE_END> <REPLACE_OLD> [] <REPLACE_NEW> [] <REPLACE_END> <REPLACE_OLD> pass <REPLACE_NEW> pass <REPLACE_END> <REPLACE_OLD> pass <REPLACE_NEW> pass <REPLACE_END> <REPLACE_OLD> run(self): import sys,subprocess <REPLACE_NEW> run(self): import os import shutil import tempfile # First ensure that we build the package so that 2to3 gets executed self.reinitialize_command('build') self.run_command('build') build_cmd = self.get_finalized_command('build') new_path = os.path.abspath(build_cmd.build_lib) # Copy the build to a temporary directory for the purposes of testing # - this avoids creating pyc and __pycache__ directories inside the # build directory tmp_dir = tempfile.mkdtemp(prefix='reprojection-test-') testing_path = os.path.join(tmp_dir, os.path.basename(new_path)) shutil.copytree(new_path, testing_path) import sys import subprocess <REPLACE_END> <REPLACE_OLD> 'runtests.py']) <REPLACE_NEW> os.path.abspath('runtests.py')], cwd=testing_path) <REPLACE_END> <|endoftext|> #!/usr/bin/env python import os from distutils.core import setup, Extension, Command from distutils.command.sdist import sdist from distutils.command.build_py import build_py from numpy import get_include as get_numpy_include numpy_includes = get_numpy_include() ext_modules = [Extension("reproject._overlap_wrapper", ['reproject/_overlap_wrapper.c', 'reproject/overlapArea.c'], include_dirs=[numpy_includes])] class PyTest(Command): user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): import os import shutil import tempfile # First ensure that we build the package so that 2to3 gets executed self.reinitialize_command('build') self.run_command('build') build_cmd = self.get_finalized_command('build') new_path = os.path.abspath(build_cmd.build_lib) # Copy the build to a temporary directory for the purposes of testing # - this avoids creating pyc and __pycache__ directories inside the # build directory tmp_dir = tempfile.mkdtemp(prefix='reprojection-test-') testing_path = os.path.join(tmp_dir, os.path.basename(new_path)) shutil.copytree(new_path, testing_path) import sys import subprocess errno = subprocess.call([sys.executable, os.path.abspath('runtests.py')], cwd=testing_path) raise SystemExit(errno) setup(name='reproject', version="0.1.0", author='Thomas Robitaille', author_email='[email protected]', packages=['reproject', 'reproject.tests'], cmdclass = {'test': PyTest}, ext_modules = ext_modules )
Make sure the package is built before it is tested #!/usr/bin/env python import os from distutils.core import setup, Extension, Command from distutils.command.sdist import sdist from distutils.command.build_py import build_py from numpy import get_include as get_numpy_include numpy_includes = get_numpy_include() ext_modules = [Extension("reproject._overlap_wrapper", ['reproject/_overlap_wrapper.c', 'reproject/overlapArea.c'], include_dirs=[numpy_includes])] class PyTest(Command): user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): import sys,subprocess errno = subprocess.call([sys.executable, 'runtests.py']) raise SystemExit(errno) setup(name='reproject', version="0.1.0", author='Thomas Robitaille', author_email='[email protected]', packages=['reproject', 'reproject.tests'], cmdclass = {'test': PyTest}, ext_modules = ext_modules )
6c8638c6e5801701d598509bb95036837ccd4a02
setup.py
setup.py
import setuptools setuptools.setup( name='mailcap-fix', version='0.1.1', description='A patched mailcap module that conforms to RFC 1524', long_description=open('README.rst').read(), url='https://github.com/michael-lazar/mailcap_fix', author='Michael Lazar', author_email='[email protected]', license='UNLICENSE', keywords='mailcap 1524', packages=['mailcap_fix'], )
import setuptools setuptools.setup( name='mailcap-fix', version='0.1.1', description='A patched mailcap module that conforms to RFC 1524', long_description=open('README.rst', encoding='utf-8').read(), url='https://github.com/michael-lazar/mailcap_fix', author='Michael Lazar', author_email='[email protected]', license='UNLICENSE', keywords='mailcap 1524', packages=['mailcap_fix'], )
Fix if encoding is not utf-8
Fix if encoding is not utf-8
Python
unlicense
michael-lazar/mailcap_fix
<REPLACE_OLD> long_description=open('README.rst').read(), <REPLACE_NEW> long_description=open('README.rst', encoding='utf-8').read(), <REPLACE_END> <|endoftext|> import setuptools setuptools.setup( name='mailcap-fix', version='0.1.1', description='A patched mailcap module that conforms to RFC 1524', long_description=open('README.rst', encoding='utf-8').read(), url='https://github.com/michael-lazar/mailcap_fix', author='Michael Lazar', author_email='[email protected]', license='UNLICENSE', keywords='mailcap 1524', packages=['mailcap_fix'], )
Fix if encoding is not utf-8 import setuptools setuptools.setup( name='mailcap-fix', version='0.1.1', description='A patched mailcap module that conforms to RFC 1524', long_description=open('README.rst').read(), url='https://github.com/michael-lazar/mailcap_fix', author='Michael Lazar', author_email='[email protected]', license='UNLICENSE', keywords='mailcap 1524', packages=['mailcap_fix'], )
664ad090e7b4c2922b5c89932e61d7ddef326da9
script/get_matrices.py
script/get_matrices.py
import sys from HTMLParser import HTMLParser class MyHtmlParser(HTMLParser): def __init__(self): HTMLParser.__init__(self) self.state = 'NONE' def handle_starttag(self, tag, attrs): if self.state == 'FINISHED': return if tag == '<table>': self.state = 'PARSING_TABLE' print tag elif tag == '<td>': self.state ='PARSING_VALUE' elif tag == '<tr>': if skipped_header: self.state = 'PARSING_ENTRY' def handle_endtag(self, tag): if tag == '<table>': self.state ='FINISHED' elif tag == '<td>': self.state = 'PARSING_ENTRY' elif tag == '<tr>': self.state = 'PARSING_TABLE' def handle_data(self, data): if self.state == 'PARSING_VALUE': print data def main(): if len(sys.argv) != 2: print "Usage: python get_matrices.py <html_file>" return f = open(sys.argv[1]) state = 'NONE' entry_count = 0 text = "" max_lines = 100 c = 0 for line in f: text = text + '\n' + line if c > max_lines: break c += 1 parser = MyHtmlParser() parser.feed('<table>bau</table>') if __name__ == '__main__': main()
Add a simple python to fetch matrices from UoF collection.
Add a simple python to fetch matrices from UoF collection.
Python
mit
caskorg/cask,caskorg/cask,caskorg/cask,caskorg/cask,caskorg/cask
<REPLACE_OLD> <REPLACE_NEW> import sys from HTMLParser import HTMLParser class MyHtmlParser(HTMLParser): def __init__(self): HTMLParser.__init__(self) self.state = 'NONE' def handle_starttag(self, tag, attrs): if self.state == 'FINISHED': return if tag == '<table>': self.state = 'PARSING_TABLE' print tag elif tag == '<td>': self.state ='PARSING_VALUE' elif tag == '<tr>': if skipped_header: self.state = 'PARSING_ENTRY' def handle_endtag(self, tag): if tag == '<table>': self.state ='FINISHED' elif tag == '<td>': self.state = 'PARSING_ENTRY' elif tag == '<tr>': self.state = 'PARSING_TABLE' def handle_data(self, data): if self.state == 'PARSING_VALUE': print data def main(): if len(sys.argv) != 2: print "Usage: python get_matrices.py <html_file>" return f = open(sys.argv[1]) state = 'NONE' entry_count = 0 text = "" max_lines = 100 c = 0 for line in f: text = text + '\n' + line if c > max_lines: break c += 1 parser = MyHtmlParser() parser.feed('<table>bau</table>') if __name__ == '__main__': main() <REPLACE_END> <|endoftext|> import sys from HTMLParser import HTMLParser class MyHtmlParser(HTMLParser): def __init__(self): HTMLParser.__init__(self) self.state = 'NONE' def handle_starttag(self, tag, attrs): if self.state == 'FINISHED': return if tag == '<table>': self.state = 'PARSING_TABLE' print tag elif tag == '<td>': self.state ='PARSING_VALUE' elif tag == '<tr>': if skipped_header: self.state = 'PARSING_ENTRY' def handle_endtag(self, tag): if tag == '<table>': self.state ='FINISHED' elif tag == '<td>': self.state = 'PARSING_ENTRY' elif tag == '<tr>': self.state = 'PARSING_TABLE' def handle_data(self, data): if self.state == 'PARSING_VALUE': print data def main(): if len(sys.argv) != 2: print "Usage: python get_matrices.py <html_file>" return f = open(sys.argv[1]) state = 'NONE' entry_count = 0 text = "" max_lines = 100 c = 0 for line in f: text = text + '\n' + line if c > max_lines: break c += 1 parser = MyHtmlParser() parser.feed('<table>bau</table>') if __name__ == '__main__': main()
Add a simple python to fetch matrices from UoF collection.
0418027b186f146ff75170ecf5c8e63c3dab3cc1
treeherder/client/setup.py
treeherder/client/setup.py
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from setuptools import setup version = '1.1' setup(name='treeherder-client', version=version, description="Python library to submit data to treeherder-service", long_description="""\ """, classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers keywords='', author='Mozilla Automation and Testing Team', author_email='[email protected]', url='https://github.com/mozilla/treeherder-client', license='MPL', packages=['thclient'], zip_safe=False, install_requires=['oauth2'], test_suite='thclient.tests', tests_require=["mock"], )
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from setuptools import setup version = '1.1' setup(name='treeherder-client', version=version, description="Python library to submit data to treeherder-service", long_description="""\ """, classifiers=[ 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Software Development :: Libraries :: Python Modules', ], keywords='', author='Mozilla Automation and Testing Team', author_email='[email protected]', url='https://github.com/mozilla/treeherder-client', license='MPL', packages=['thclient'], zip_safe=False, install_requires=['oauth2'], test_suite='thclient.tests', tests_require=["mock"], )
Add various classifications for pypi
Add various classifications for pypi
Python
mpl-2.0
rail/treeherder,glenn124f/treeherder,parkouss/treeherder,tojon/treeherder,adusca/treeherder,akhileshpillai/treeherder,glenn124f/treeherder,adusca/treeherder,avih/treeherder,wlach/treeherder,moijes12/treeherder,akhileshpillai/treeherder,avih/treeherder,vaishalitekale/treeherder,vaishalitekale/treeherder,rail/treeherder,moijes12/treeherder,edmorley/treeherder,tojon/treeherder,deathping1994/treeherder,jgraham/treeherder,tojonmz/treeherder,moijes12/treeherder,jgraham/treeherder,moijes12/treeherder,wlach/treeherder,wlach/treeherder,kapy2010/treeherder,wlach/treeherder,parkouss/treeherder,deathping1994/treeherder,adusca/treeherder,tojonmz/treeherder,vaishalitekale/treeherder,edmorley/treeherder,jgraham/treeherder,deathping1994/treeherder,glenn124f/treeherder,KWierso/treeherder,wlach/treeherder,kapy2010/treeherder,deathping1994/treeherder,parkouss/treeherder,gbrmachado/treeherder,parkouss/treeherder,glenn124f/treeherder,akhileshpillai/treeherder,sylvestre/treeherder,adusca/treeherder,avih/treeherder,avih/treeherder,KWierso/treeherder,moijes12/treeherder,sylvestre/treeherder,akhileshpillai/treeherder,gbrmachado/treeherder,vaishalitekale/treeherder,tojonmz/treeherder,parkouss/treeherder,rail/treeherder,sylvestre/treeherder,jgraham/treeherder,rail/treeherder,jgraham/treeherder,avih/treeherder,KWierso/treeherder,gbrmachado/treeherder,moijes12/treeherder,gbrmachado/treeherder,tojon/treeherder,glenn124f/treeherder,deathping1994/treeherder,sylvestre/treeherder,edmorley/treeherder,tojonmz/treeherder,gbrmachado/treeherder,vaishalitekale/treeherder,tojon/treeherder,tojonmz/treeherder,gbrmachado/treeherder,vaishalitekale/treeherder,glenn124f/treeherder,kapy2010/treeherder,kapy2010/treeherder,tojonmz/treeherder,sylvestre/treeherder,avih/treeherder,edmorley/treeherder,kapy2010/treeherder,KWierso/treeherder,akhileshpillai/treeherder,rail/treeherder,parkouss/treeherder,deathping1994/treeherder,jgraham/treeherder,akhileshpillai/treeherder,wlach/treeherder,rail/treeherder,adusca/treeherder,adusca/treeherder,sylvestre/treeherder
<REPLACE_OLD> classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers <REPLACE_NEW> classifiers=[ 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Software Development :: Libraries :: Python Modules', ], <REPLACE_END> <|endoftext|> # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from setuptools import setup version = '1.1' setup(name='treeherder-client', version=version, description="Python library to submit data to treeherder-service", long_description="""\ """, classifiers=[ 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Software Development :: Libraries :: Python Modules', ], keywords='', author='Mozilla Automation and Testing Team', author_email='[email protected]', url='https://github.com/mozilla/treeherder-client', license='MPL', packages=['thclient'], zip_safe=False, install_requires=['oauth2'], test_suite='thclient.tests', tests_require=["mock"], )
Add various classifications for pypi # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from setuptools import setup version = '1.1' setup(name='treeherder-client', version=version, description="Python library to submit data to treeherder-service", long_description="""\ """, classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers keywords='', author='Mozilla Automation and Testing Team', author_email='[email protected]', url='https://github.com/mozilla/treeherder-client', license='MPL', packages=['thclient'], zip_safe=False, install_requires=['oauth2'], test_suite='thclient.tests', tests_require=["mock"], )
2eeab9e35badba0c271b1d1671f08347a5c5e06e
penchy/tests/test_elements.py
penchy/tests/test_elements.py
import unittest2 from penchy.tests.util import MockPipelineElement class PipelineElementHookTest(unittest2.TestCase): def setUp(self): self.e = MockPipelineElement() self.list_ = [23, 42, 5] def test_pre_hooks(self): self.e.prehooks = [ lambda: self.list_.__setitem__(0, 1), lambda: self.list_.__setitem__(1, 1), lambda: self.list_.__setitem__(2, 1)] self.e.run() self.assertListEqual(self.list_, [1, 1, 1]) def test_post_hooks(self): self.e.posthooks = [ lambda: self.list_.__setitem__(0, 1), lambda: self.list_.__setitem__(1, 1), lambda: self.list_.__setitem__(2, 1)] self.e.run() self.assertListEqual(self.list_, [1, 1, 1])
Add test for PipelineElement hooks.
tests: Add test for PipelineElement hooks. Signed-off-by: Michael Markert <[email protected]>
Python
mit
fhirschmann/penchy,fhirschmann/penchy
<INSERT> import unittest2 from penchy.tests.util import MockPipelineElement class PipelineElementHookTest(unittest2.TestCase): <INSERT_END> <INSERT> def setUp(self): self.e = MockPipelineElement() self.list_ = [23, 42, 5] def test_pre_hooks(self): self.e.prehooks = [ lambda: self.list_.__setitem__(0, 1), lambda: self.list_.__setitem__(1, 1), lambda: self.list_.__setitem__(2, 1)] self.e.run() self.assertListEqual(self.list_, [1, 1, 1]) def test_post_hooks(self): self.e.posthooks = [ lambda: self.list_.__setitem__(0, 1), lambda: self.list_.__setitem__(1, 1), lambda: self.list_.__setitem__(2, 1)] self.e.run() self.assertListEqual(self.list_, [1, 1, 1]) <INSERT_END> <|endoftext|> import unittest2 from penchy.tests.util import MockPipelineElement class PipelineElementHookTest(unittest2.TestCase): def setUp(self): self.e = MockPipelineElement() self.list_ = [23, 42, 5] def test_pre_hooks(self): self.e.prehooks = [ lambda: self.list_.__setitem__(0, 1), lambda: self.list_.__setitem__(1, 1), lambda: self.list_.__setitem__(2, 1)] self.e.run() self.assertListEqual(self.list_, [1, 1, 1]) def test_post_hooks(self): self.e.posthooks = [ lambda: self.list_.__setitem__(0, 1), lambda: self.list_.__setitem__(1, 1), lambda: self.list_.__setitem__(2, 1)] self.e.run() self.assertListEqual(self.list_, [1, 1, 1])
tests: Add test for PipelineElement hooks. Signed-off-by: Michael Markert <[email protected]>
e421a3cfd9ecfe05aa21b2b3da792f7ab824727d
experimental/db/remove_property.py
experimental/db/remove_property.py
""" Remove a property from the datastore. How to use: $ cd experimental/db/ $ PYTHONPATH=. remote_api_shell.py -s homeawesomation.appspot.com > import remove_property """ from google.appengine.api import namespace_manager from google.appengine.ext import db class Base(db.Expando): pass def remove(namespace, field): namespace_manager.set_namespace(namespace) for base in Base.all().run(): if hasattr(base, field): print "%s %s" %(base.id, base.name) del base.category base.put()
""" Remove a property from the datastore. How to use: $ cd experimental/db/ $ PYTHONPATH=. remote_api_shell.py -s homeawesomation.appspot.com > import remove_property """ from google.appengine.api import namespace_manager from google.appengine.ext import db class Base(db.Expando): pass def remove(namespace, field): namespace_manager.set_namespace(namespace) for base in Base.all().run(): if hasattr(base, field): print "%s %s" % (base.key().id_or_name(), getattr(base, 'name', None)) delattr(base, field) base.put()
Fix datastore delete field script.
Fix datastore delete field script.
Python
mit
tomwilkie/awesomation,tomwilkie/awesomation,tomwilkie/awesomation,tomwilkie/awesomation,tomwilkie/awesomation
<REPLACE_OLD> %(base.id, base.name) <REPLACE_NEW> % (base.key().id_or_name(), getattr(base, 'name', None)) <REPLACE_END> <REPLACE_OLD> del base.category <REPLACE_NEW> delattr(base, field) <REPLACE_END> <|endoftext|> """ Remove a property from the datastore. How to use: $ cd experimental/db/ $ PYTHONPATH=. remote_api_shell.py -s homeawesomation.appspot.com > import remove_property """ from google.appengine.api import namespace_manager from google.appengine.ext import db class Base(db.Expando): pass def remove(namespace, field): namespace_manager.set_namespace(namespace) for base in Base.all().run(): if hasattr(base, field): print "%s %s" % (base.key().id_or_name(), getattr(base, 'name', None)) delattr(base, field) base.put()
Fix datastore delete field script. """ Remove a property from the datastore. How to use: $ cd experimental/db/ $ PYTHONPATH=. remote_api_shell.py -s homeawesomation.appspot.com > import remove_property """ from google.appengine.api import namespace_manager from google.appengine.ext import db class Base(db.Expando): pass def remove(namespace, field): namespace_manager.set_namespace(namespace) for base in Base.all().run(): if hasattr(base, field): print "%s %s" %(base.id, base.name) del base.category base.put()
13ba81df82f2c43838066ec9cd0fa1222324349f
srsly/util.py
srsly/util.py
# coding: utf8 from __future__ import unicode_literals from pathlib import Path import sys def force_path(location, require_exists=True): if not isinstance(location, Path): location = Path(location) if require_exists and not location.exists(): raise ValueError("Can't read file: {}".format(location)) return location def force_string(location): if sys.version_info[0] == 2: # Python 2 return str(location).decode("utf8") return str(location)
# coding: utf8 from __future__ import unicode_literals from pathlib import Path import sys is_python2 = sys.version_info[0] == 2 is_python3 = sys.version_info[0] == 3 if is_python2: basestring_ = basestring # noqa: F821 else: basestring_ = str def force_path(location, require_exists=True): if not isinstance(location, Path): location = Path(location) if require_exists and not location.exists(): raise ValueError("Can't read file: {}".format(location)) return location def force_string(location): if isinstance(location, basestring_): return location if sys.version_info[0] == 2: # Python 2 return str(location).decode("utf8") return str(location)
Improve compat handling in force_string
Improve compat handling in force_string If we know we already have a string, no need to force it into a strinbg
Python
mit
explosion/srsly,explosion/srsly,explosion/srsly,explosion/srsly
<REPLACE_OLD> sys def <REPLACE_NEW> sys is_python2 = sys.version_info[0] == 2 is_python3 = sys.version_info[0] == 3 if is_python2: basestring_ = basestring # noqa: F821 else: basestring_ = str def <REPLACE_END> <INSERT> isinstance(location, basestring_): return location if <INSERT_END> <|endoftext|> # coding: utf8 from __future__ import unicode_literals from pathlib import Path import sys is_python2 = sys.version_info[0] == 2 is_python3 = sys.version_info[0] == 3 if is_python2: basestring_ = basestring # noqa: F821 else: basestring_ = str def force_path(location, require_exists=True): if not isinstance(location, Path): location = Path(location) if require_exists and not location.exists(): raise ValueError("Can't read file: {}".format(location)) return location def force_string(location): if isinstance(location, basestring_): return location if sys.version_info[0] == 2: # Python 2 return str(location).decode("utf8") return str(location)
Improve compat handling in force_string If we know we already have a string, no need to force it into a strinbg # coding: utf8 from __future__ import unicode_literals from pathlib import Path import sys def force_path(location, require_exists=True): if not isinstance(location, Path): location = Path(location) if require_exists and not location.exists(): raise ValueError("Can't read file: {}".format(location)) return location def force_string(location): if sys.version_info[0] == 2: # Python 2 return str(location).decode("utf8") return str(location)
ed09a3ded286cc4d5623c17e65b2d40ef55ccee7
valohai_yaml/parsing.py
valohai_yaml/parsing.py
from typing import IO, Union from valohai_yaml.objs import Config from .utils import read_yaml def parse(yaml: Union[dict, list, bytes, str, IO], validate: bool = True) -> Config: """ Parse the given YAML data into a `Config` object, optionally validating it first. :param yaml: YAML data (either a string, a stream, or pre-parsed Python dict/list) :param validate: Whether to validate the data before attempting to parse it. :return: Config object """ data = read_yaml(yaml) if validate: # pragma: no branch from .validation import validate as do_validate do_validate(data, raise_exc=True) return Config.parse(data)
from typing import IO, Union from valohai_yaml.objs import Config from .utils import read_yaml def parse(yaml: Union[dict, list, bytes, str, IO], validate: bool = True) -> Config: """ Parse the given YAML data into a `Config` object, optionally validating it first. :param yaml: YAML data (either a string, a stream, or pre-parsed Python dict/list) :param validate: Whether to validate the data before attempting to parse it. :return: Config object """ data = read_yaml(yaml) if data is None: # empty file return Config() if validate: # pragma: no branch from .validation import validate as do_validate do_validate(data, raise_exc=True) return Config.parse(data)
Handle empty YAML files in parse()
Handle empty YAML files in parse() Refs valohai/valohai-cli#170
Python
mit
valohai/valohai-yaml
<INSERT> data is None: # empty file return Config() if <INSERT_END> <|endoftext|> from typing import IO, Union from valohai_yaml.objs import Config from .utils import read_yaml def parse(yaml: Union[dict, list, bytes, str, IO], validate: bool = True) -> Config: """ Parse the given YAML data into a `Config` object, optionally validating it first. :param yaml: YAML data (either a string, a stream, or pre-parsed Python dict/list) :param validate: Whether to validate the data before attempting to parse it. :return: Config object """ data = read_yaml(yaml) if data is None: # empty file return Config() if validate: # pragma: no branch from .validation import validate as do_validate do_validate(data, raise_exc=True) return Config.parse(data)
Handle empty YAML files in parse() Refs valohai/valohai-cli#170 from typing import IO, Union from valohai_yaml.objs import Config from .utils import read_yaml def parse(yaml: Union[dict, list, bytes, str, IO], validate: bool = True) -> Config: """ Parse the given YAML data into a `Config` object, optionally validating it first. :param yaml: YAML data (either a string, a stream, or pre-parsed Python dict/list) :param validate: Whether to validate the data before attempting to parse it. :return: Config object """ data = read_yaml(yaml) if validate: # pragma: no branch from .validation import validate as do_validate do_validate(data, raise_exc=True) return Config.parse(data)
e818860af87cad796699e27f8dfb4ff6fc9354e8
h2o-py/h2o/model/autoencoder.py
h2o-py/h2o/model/autoencoder.py
""" AutoEncoder Models """ from model_base import * from metrics_base import * class H2OAutoEncoderModel(ModelBase): """ Class for AutoEncoder models. """ def __init__(self, dest_key, model_json): super(H2OAutoEncoderModel, self).__init__(dest_key, model_json,H2OAutoEncoderModelMetrics) def anomaly(self,test_data): """ Obtain the reconstruction error for the input test_data. :param test_data: The dataset upon which the reconstruction error is computed. :return: Return the reconstruction error. """ if not test_data: raise ValueError("Must specify test data") j = H2OConnection.post_json("Predictions/models/" + self._id + "/frames/" + test_data._id, reconstruction_error=True) return h2o.get_frame(j["model_metrics"][0]["predictions"]["frame_id"]["name"])
""" AutoEncoder Models """ from model_base import * from metrics_base import * class H2OAutoEncoderModel(ModelBase): """ Class for AutoEncoder models. """ def __init__(self, dest_key, model_json): super(H2OAutoEncoderModel, self).__init__(dest_key, model_json,H2OAutoEncoderModelMetrics) def anomaly(self,test_data,per_feature=False): """ Obtain the reconstruction error for the input test_data. :param test_data: The dataset upon which the reconstruction error is computed. :param per_feature: Whether to return the square reconstruction error per feature. Otherwise, return the mean square error. :return: Return the reconstruction error. """ if not test_data: raise ValueError("Must specify test data") j = H2OConnection.post_json("Predictions/models/" + self._id + "/frames/" + test_data._id, reconstruction_error=True, reconstruction_error_per_feature=per_feature) return h2o.get_frame(j["model_metrics"][0]["predictions"]["frame_id"]["name"])
Add extra argument to get per-feature reconstruction error for anomaly detection from Python.
PUBDEV-2078: Add extra argument to get per-feature reconstruction error for anomaly detection from Python.
Python
apache-2.0
kyoren/https-github.com-h2oai-h2o-3,h2oai/h2o-3,mathemage/h2o-3,h2oai/h2o-dev,mathemage/h2o-3,datachand/h2o-3,YzPaul3/h2o-3,h2oai/h2o-3,brightchen/h2o-3,mathemage/h2o-3,YzPaul3/h2o-3,h2oai/h2o-dev,datachand/h2o-3,kyoren/https-github.com-h2oai-h2o-3,printedheart/h2o-3,pchmieli/h2o-3,madmax983/h2o-3,YzPaul3/h2o-3,datachand/h2o-3,YzPaul3/h2o-3,printedheart/h2o-3,kyoren/https-github.com-h2oai-h2o-3,junwucs/h2o-3,pchmieli/h2o-3,datachand/h2o-3,junwucs/h2o-3,mathemage/h2o-3,h2oai/h2o-3,printedheart/h2o-3,junwucs/h2o-3,kyoren/https-github.com-h2oai-h2o-3,YzPaul3/h2o-3,madmax983/h2o-3,michalkurka/h2o-3,junwucs/h2o-3,printedheart/h2o-3,datachand/h2o-3,pchmieli/h2o-3,michalkurka/h2o-3,printedheart/h2o-3,brightchen/h2o-3,h2oai/h2o-dev,jangorecki/h2o-3,madmax983/h2o-3,h2oai/h2o-3,michalkurka/h2o-3,h2oai/h2o-3,YzPaul3/h2o-3,jangorecki/h2o-3,h2oai/h2o-dev,jangorecki/h2o-3,brightchen/h2o-3,pchmieli/h2o-3,brightchen/h2o-3,spennihana/h2o-3,junwucs/h2o-3,mathemage/h2o-3,printedheart/h2o-3,madmax983/h2o-3,h2oai/h2o-3,jangorecki/h2o-3,kyoren/https-github.com-h2oai-h2o-3,madmax983/h2o-3,datachand/h2o-3,michalkurka/h2o-3,michalkurka/h2o-3,brightchen/h2o-3,jangorecki/h2o-3,madmax983/h2o-3,junwucs/h2o-3,jangorecki/h2o-3,h2oai/h2o-3,spennihana/h2o-3,h2oai/h2o-dev,michalkurka/h2o-3,pchmieli/h2o-3,junwucs/h2o-3,mathemage/h2o-3,datachand/h2o-3,kyoren/https-github.com-h2oai-h2o-3,jangorecki/h2o-3,spennihana/h2o-3,spennihana/h2o-3,spennihana/h2o-3,madmax983/h2o-3,spennihana/h2o-3,h2oai/h2o-3,YzPaul3/h2o-3,h2oai/h2o-dev,spennihana/h2o-3,brightchen/h2o-3,pchmieli/h2o-3,h2oai/h2o-dev,brightchen/h2o-3,kyoren/https-github.com-h2oai-h2o-3,michalkurka/h2o-3,pchmieli/h2o-3,mathemage/h2o-3,printedheart/h2o-3
<REPLACE_OLD> anomaly(self,test_data): <REPLACE_NEW> anomaly(self,test_data,per_feature=False): <REPLACE_END> <INSERT> :param per_feature: Whether to return the square reconstruction error per feature. Otherwise, return the mean square error. <INSERT_END> <REPLACE_OLD> reconstruction_error=True) <REPLACE_NEW> reconstruction_error=True, reconstruction_error_per_feature=per_feature) <REPLACE_END> <|endoftext|> """ AutoEncoder Models """ from model_base import * from metrics_base import * class H2OAutoEncoderModel(ModelBase): """ Class for AutoEncoder models. """ def __init__(self, dest_key, model_json): super(H2OAutoEncoderModel, self).__init__(dest_key, model_json,H2OAutoEncoderModelMetrics) def anomaly(self,test_data,per_feature=False): """ Obtain the reconstruction error for the input test_data. :param test_data: The dataset upon which the reconstruction error is computed. :param per_feature: Whether to return the square reconstruction error per feature. Otherwise, return the mean square error. :return: Return the reconstruction error. """ if not test_data: raise ValueError("Must specify test data") j = H2OConnection.post_json("Predictions/models/" + self._id + "/frames/" + test_data._id, reconstruction_error=True, reconstruction_error_per_feature=per_feature) return h2o.get_frame(j["model_metrics"][0]["predictions"]["frame_id"]["name"])
PUBDEV-2078: Add extra argument to get per-feature reconstruction error for anomaly detection from Python. """ AutoEncoder Models """ from model_base import * from metrics_base import * class H2OAutoEncoderModel(ModelBase): """ Class for AutoEncoder models. """ def __init__(self, dest_key, model_json): super(H2OAutoEncoderModel, self).__init__(dest_key, model_json,H2OAutoEncoderModelMetrics) def anomaly(self,test_data): """ Obtain the reconstruction error for the input test_data. :param test_data: The dataset upon which the reconstruction error is computed. :return: Return the reconstruction error. """ if not test_data: raise ValueError("Must specify test data") j = H2OConnection.post_json("Predictions/models/" + self._id + "/frames/" + test_data._id, reconstruction_error=True) return h2o.get_frame(j["model_metrics"][0]["predictions"]["frame_id"]["name"])
dda88345334985796dac2095f6e78bb106bc19b3
pullpush/pullpush.py
pullpush/pullpush.py
#!/usr/bin/env python3 import git class PullPush: def __init__(self, repo_dir): self.repo_dir = repo_dir self.repo = None def pull(self, source_repo): """ Pulls the remote source_repo and stores it in the repo_dir directory. """ self.repo = git.Repo.init(self.repo_dir) origin = self.repo.create_remote('origin', source_repo) origin.fetch() origin.pull(origin.refs[0].remote_head) def set_target_repo(self, new_url): """ Changes the target url of the previously pulled repo. """ origin = self.repo.remotes.origin cw = origin.config_writer cw.set("url", new_url) cw.release() def push(self, target_repo): """ Pushes the previously pulled repo to the target_repo. """ origin = self.repo.remotes.origin self.set_target_repo(target_repo) self.repo.create_head('master', origin.refs.master).set_tracking_branch(origin.refs.master) origin.push()
#!/usr/bin/env python3 import git class PullPush: def __init__(self, repo_dir): self.repo_dir = repo_dir self.repo = None def pull(self, source_repo): """ Pulls the remote source_repo and stores it in the repo_dir directory. """ self.repo = git.Repo.init(self.repo_dir) origin = self.repo.create_remote('origin', source_repo) origin.fetch() origin.pull(origin.refs[0].remote_head) def set_target_repo(self, new_url): """ Changes the target url of the previously pulled repo. """ origin = self.repo.remotes.origin cw = origin.config_writer cw.set("url", new_url) cw.release() def push(self, target_repo): """ Pushes the previously pulled repo to the target_repo. """ if self.repo is None: # TODO Better handling return origin = self.repo.remotes.origin self.set_target_repo(target_repo) self.repo.create_head('master', origin.refs.master).set_tracking_branch(origin.refs.master) origin.push()
Check of repo was pulled
Check of repo was pulled
Python
mit
martialblog/git-pullpush
<INSERT> if self.repo is None: # TODO Better handling return <INSERT_END> <|endoftext|> #!/usr/bin/env python3 import git class PullPush: def __init__(self, repo_dir): self.repo_dir = repo_dir self.repo = None def pull(self, source_repo): """ Pulls the remote source_repo and stores it in the repo_dir directory. """ self.repo = git.Repo.init(self.repo_dir) origin = self.repo.create_remote('origin', source_repo) origin.fetch() origin.pull(origin.refs[0].remote_head) def set_target_repo(self, new_url): """ Changes the target url of the previously pulled repo. """ origin = self.repo.remotes.origin cw = origin.config_writer cw.set("url", new_url) cw.release() def push(self, target_repo): """ Pushes the previously pulled repo to the target_repo. """ if self.repo is None: # TODO Better handling return origin = self.repo.remotes.origin self.set_target_repo(target_repo) self.repo.create_head('master', origin.refs.master).set_tracking_branch(origin.refs.master) origin.push()
Check of repo was pulled #!/usr/bin/env python3 import git class PullPush: def __init__(self, repo_dir): self.repo_dir = repo_dir self.repo = None def pull(self, source_repo): """ Pulls the remote source_repo and stores it in the repo_dir directory. """ self.repo = git.Repo.init(self.repo_dir) origin = self.repo.create_remote('origin', source_repo) origin.fetch() origin.pull(origin.refs[0].remote_head) def set_target_repo(self, new_url): """ Changes the target url of the previously pulled repo. """ origin = self.repo.remotes.origin cw = origin.config_writer cw.set("url", new_url) cw.release() def push(self, target_repo): """ Pushes the previously pulled repo to the target_repo. """ origin = self.repo.remotes.origin self.set_target_repo(target_repo) self.repo.create_head('master', origin.refs.master).set_tracking_branch(origin.refs.master) origin.push()
518f9bff28585aa1eeb12b9b12d95e32fb257725
src/district_distance.py
src/district_distance.py
# coding: utf-8 # In[79]: import math import operator import json from geopy.distance import great_circle # In[90]: class Order_districts(): def get_district_info(): # -- get names and coordinates from csv file with open('coordinates.json') as coord_file: district_dict = json.load(coord_file) return district_dict # In[134]: get_district_info(); # test for json reading # In[128]: def distance(lat0, lon0, lat, lon): ''' Calculates distance on Earth's surface in meters ''' return great_circle((lat0,lon0), (lat,lon)).meters def e_distance(x,y,w,z): ''' Euclidean distance calculation for simple sorting purposes ''' a = math.pow(x - w,2) b = math.pow(y - z,2) return math.sqrt(a+b) # In[131]: def order_districts(lat0, lon0, district_dict): ''' function that return a list of names of districts ordered by distance from the point (lat0,lon0) passed from map Inputs: 'lat0' = latitude of point at center of map 'lon0' = longitude of point at center of map 'district_dict' = dict of district names and (lat,lon) from function get_district_info() Outputs: df with district names ordered by distance, coordinates of district (lat,lon) ''' distance_dict={} # -- loop thru entries in coord/name dictionary for key, value in district_dict.iteritems(): lat = float(value[0]); lon = float(value[1]); # -- calculate coords in radians #Delta_lat = math.radians(lat0-lat) # latitudinal distance #Delta_lon = math.radians(lon0-lon) # longitudinal distance #lat0 = math.radians(lat0) # convert to radians #lat = math.radians(lat) distance_dict[key] = distance(lat0, lon0, lat, lon) sorted_districts = sorted(distance_dict.items(), key=operator.itemgetter(1)) return zip(*sorted_districts) # In[136]: #order_districts(27.67298,85.43005,get_district_info())[0] # test for distance # In[121]: # In[ ]:
Return the districts name by distance given coordinates
Return the districts name by distance given coordinates
Python
apache-2.0
ldolberg/the_port_ors_hdx,ldolberg/the_port_ors_hdx
<REPLACE_OLD> <REPLACE_NEW> # coding: utf-8 # In[79]: import math import operator import json from geopy.distance import great_circle # In[90]: class Order_districts(): def get_district_info(): # -- get names and coordinates from csv file with open('coordinates.json') as coord_file: district_dict = json.load(coord_file) return district_dict # In[134]: get_district_info(); # test for json reading # In[128]: def distance(lat0, lon0, lat, lon): ''' Calculates distance on Earth's surface in meters ''' return great_circle((lat0,lon0), (lat,lon)).meters def e_distance(x,y,w,z): ''' Euclidean distance calculation for simple sorting purposes ''' a = math.pow(x - w,2) b = math.pow(y - z,2) return math.sqrt(a+b) # In[131]: def order_districts(lat0, lon0, district_dict): ''' function that return a list of names of districts ordered by distance from the point (lat0,lon0) passed from map Inputs: 'lat0' = latitude of point at center of map 'lon0' = longitude of point at center of map 'district_dict' = dict of district names and (lat,lon) from function get_district_info() Outputs: df with district names ordered by distance, coordinates of district (lat,lon) ''' distance_dict={} # -- loop thru entries in coord/name dictionary for key, value in district_dict.iteritems(): lat = float(value[0]); lon = float(value[1]); # -- calculate coords in radians #Delta_lat = math.radians(lat0-lat) # latitudinal distance #Delta_lon = math.radians(lon0-lon) # longitudinal distance #lat0 = math.radians(lat0) # convert to radians #lat = math.radians(lat) distance_dict[key] = distance(lat0, lon0, lat, lon) sorted_districts = sorted(distance_dict.items(), key=operator.itemgetter(1)) return zip(*sorted_districts) # In[136]: #order_districts(27.67298,85.43005,get_district_info())[0] # test for distance # In[121]: # In[ ]: <REPLACE_END> <|endoftext|> # coding: utf-8 # In[79]: import math import operator import json from geopy.distance import great_circle # In[90]: class Order_districts(): def get_district_info(): # -- get names and coordinates from csv file with open('coordinates.json') as coord_file: district_dict = json.load(coord_file) return district_dict # In[134]: get_district_info(); # test for json reading # In[128]: def distance(lat0, lon0, lat, lon): ''' Calculates distance on Earth's surface in meters ''' return great_circle((lat0,lon0), (lat,lon)).meters def e_distance(x,y,w,z): ''' Euclidean distance calculation for simple sorting purposes ''' a = math.pow(x - w,2) b = math.pow(y - z,2) return math.sqrt(a+b) # In[131]: def order_districts(lat0, lon0, district_dict): ''' function that return a list of names of districts ordered by distance from the point (lat0,lon0) passed from map Inputs: 'lat0' = latitude of point at center of map 'lon0' = longitude of point at center of map 'district_dict' = dict of district names and (lat,lon) from function get_district_info() Outputs: df with district names ordered by distance, coordinates of district (lat,lon) ''' distance_dict={} # -- loop thru entries in coord/name dictionary for key, value in district_dict.iteritems(): lat = float(value[0]); lon = float(value[1]); # -- calculate coords in radians #Delta_lat = math.radians(lat0-lat) # latitudinal distance #Delta_lon = math.radians(lon0-lon) # longitudinal distance #lat0 = math.radians(lat0) # convert to radians #lat = math.radians(lat) distance_dict[key] = distance(lat0, lon0, lat, lon) sorted_districts = sorted(distance_dict.items(), key=operator.itemgetter(1)) return zip(*sorted_districts) # In[136]: #order_districts(27.67298,85.43005,get_district_info())[0] # test for distance # In[121]: # In[ ]:
Return the districts name by distance given coordinates
ba2f2d7e53f0ffc58c882d78f1b8bc9a468eb164
predicates.py
predicates.py
class OneOf: def __init__(self, members): self.members = members def __call__(self, candidate): if candidate in self.members: return True return "%s not in %s" % (candidate, self.members) def __repr__(self): return "one of %s" % ', '.join(self.members) def oneof(*members): return OneOf(members) class InRange: def __init__(self, start, end): self.start = start self.end = end def __call__(self, candidate): if self.start <= candidate <= self.end: return True return "%s not between %s and %s" % (candidate, self.start, self.end) def __repr__(self): return "between %s and %s" % (self.start, self.end) def inrange(start, end): return InRange(start, end)
class OneOf: def __init__(self, members): self.members = members def __call__(self, candidate): if candidate in self.members: return True return "%s not in %s" % (candidate, self.members) def __repr__(self): return "one of %s" % ', '.join(map(repr, self.members)) def oneof(*members): return OneOf(members) class InRange: def __init__(self, start, end): self.start = start self.end = end def __call__(self, candidate): if self.start <= candidate <= self.end: return True return "%s not between %s and %s" % (candidate, self.start, self.end) def __repr__(self): return "between %s and %s" % (self.start, self.end) def inrange(start, end): return InRange(start, end)
Fix problem rendering oneof() predicate when the members aren't strings
Fix problem rendering oneof() predicate when the members aren't strings
Python
mit
mrozekma/pytypecheck
<REPLACE_OLD> '.join(self.members) def <REPLACE_NEW> '.join(map(repr, self.members)) def <REPLACE_END> <|endoftext|> class OneOf: def __init__(self, members): self.members = members def __call__(self, candidate): if candidate in self.members: return True return "%s not in %s" % (candidate, self.members) def __repr__(self): return "one of %s" % ', '.join(map(repr, self.members)) def oneof(*members): return OneOf(members) class InRange: def __init__(self, start, end): self.start = start self.end = end def __call__(self, candidate): if self.start <= candidate <= self.end: return True return "%s not between %s and %s" % (candidate, self.start, self.end) def __repr__(self): return "between %s and %s" % (self.start, self.end) def inrange(start, end): return InRange(start, end)
Fix problem rendering oneof() predicate when the members aren't strings class OneOf: def __init__(self, members): self.members = members def __call__(self, candidate): if candidate in self.members: return True return "%s not in %s" % (candidate, self.members) def __repr__(self): return "one of %s" % ', '.join(self.members) def oneof(*members): return OneOf(members) class InRange: def __init__(self, start, end): self.start = start self.end = end def __call__(self, candidate): if self.start <= candidate <= self.end: return True return "%s not between %s and %s" % (candidate, self.start, self.end) def __repr__(self): return "between %s and %s" % (self.start, self.end) def inrange(start, end): return InRange(start, end)
79460959472f44abaed3d03689f9d397a77399c7
apps/careeropportunity/forms.py
apps/careeropportunity/forms.py
from django import forms from apps.careeropportunity.models import CareerOpportunity class AddCareerOpportunityForm(forms.ModelForm): description = forms.CharField(label='Beskrivelse', required=True, widget=forms.Textarea( attrs={'placeholder': 'Detaljert beskrivelse av karrieremuligheten'})) ingress = forms.CharField(label='Ingress', required=True, widget=forms.TextInput( attrs={'placeholder': 'Kort ingress til karrieremuligheten'})) class Meta: model = CareerOpportunity fields = ('company', 'title', 'ingress', 'description', 'start', 'end', 'featured')
from django import forms from apps.careeropportunity.models import CareerOpportunity class AddCareerOpportunityForm(forms.ModelForm): title = forms.CharField(label='Tittel', required=True, widget=forms.TextInput( attrs={'placeholder': 'Tittel for karrieremuligheten'})) ingress = forms.CharField(label='Ingress', required=True, widget=forms.Textarea( attrs={'placeholder': 'Kort ingress til karrieremuligheten (Max 250 tegn)'})) description = forms.CharField(label='Beskrivelse', required=True, widget=forms.Textarea( attrs={'placeholder': 'Detaljert beskrivelse av karrieremuligheten'})) start = forms.DateTimeField(label='Start-tid', required=True, widget=forms.TextInput( attrs={'placeholder': 'Velg start-tid'})) end = forms.DateTimeField(label='Slutt-tid', required=True, widget=forms.TextInput( attrs={'placeholder': 'Velg slutt-tid'})) deadline = forms.DateTimeField(label='Søknadsfrist', required=True, widget=forms.TextInput( attrs={'placeholder': 'Velg søknadsfrist'})) class Meta: model = CareerOpportunity fields = ('company', 'title', 'ingress', 'description', 'start', 'end', 'featured', 'deadline', 'employment', 'location')
Add inputfields for new attributes on careeropportunities and placeholdertext
Add inputfields for new attributes on careeropportunities and placeholdertext
Python
mit
dotKom/onlineweb4,dotKom/onlineweb4,dotKom/onlineweb4,dotKom/onlineweb4
<REPLACE_OLD> AddCareerOpportunityForm(forms.ModelForm): <REPLACE_NEW> AddCareerOpportunityForm(forms.ModelForm): title = forms.CharField(label='Tittel', required=True, widget=forms.TextInput( attrs={'placeholder': 'Tittel for karrieremuligheten'})) ingress = forms.CharField(label='Ingress', required=True, widget=forms.Textarea( attrs={'placeholder': 'Kort ingress til karrieremuligheten (Max 250 tegn)'})) <REPLACE_END> <REPLACE_OLD> ingress <REPLACE_NEW> start <REPLACE_END> <REPLACE_OLD> forms.CharField(label='Ingress', <REPLACE_NEW> forms.DateTimeField(label='Start-tid', <REPLACE_END> <REPLACE_OLD> 'Kort ingress til karrieremuligheten'})) <REPLACE_NEW> 'Velg start-tid'})) end = forms.DateTimeField(label='Slutt-tid', required=True, widget=forms.TextInput( attrs={'placeholder': 'Velg slutt-tid'})) deadline = forms.DateTimeField(label='Søknadsfrist', required=True, widget=forms.TextInput( attrs={'placeholder': 'Velg søknadsfrist'})) <REPLACE_END> <REPLACE_OLD> 'featured') <REPLACE_NEW> 'featured', 'deadline', 'employment', 'location') <REPLACE_END> <|endoftext|> from django import forms from apps.careeropportunity.models import CareerOpportunity class AddCareerOpportunityForm(forms.ModelForm): title = forms.CharField(label='Tittel', required=True, widget=forms.TextInput( attrs={'placeholder': 'Tittel for karrieremuligheten'})) ingress = forms.CharField(label='Ingress', required=True, widget=forms.Textarea( attrs={'placeholder': 'Kort ingress til karrieremuligheten (Max 250 tegn)'})) description = forms.CharField(label='Beskrivelse', required=True, widget=forms.Textarea( attrs={'placeholder': 'Detaljert beskrivelse av karrieremuligheten'})) start = forms.DateTimeField(label='Start-tid', required=True, widget=forms.TextInput( attrs={'placeholder': 'Velg start-tid'})) end = forms.DateTimeField(label='Slutt-tid', required=True, widget=forms.TextInput( attrs={'placeholder': 'Velg slutt-tid'})) deadline = forms.DateTimeField(label='Søknadsfrist', required=True, widget=forms.TextInput( attrs={'placeholder': 'Velg søknadsfrist'})) class Meta: model = CareerOpportunity fields = ('company', 'title', 'ingress', 'description', 'start', 'end', 'featured', 'deadline', 'employment', 'location')
Add inputfields for new attributes on careeropportunities and placeholdertext from django import forms from apps.careeropportunity.models import CareerOpportunity class AddCareerOpportunityForm(forms.ModelForm): description = forms.CharField(label='Beskrivelse', required=True, widget=forms.Textarea( attrs={'placeholder': 'Detaljert beskrivelse av karrieremuligheten'})) ingress = forms.CharField(label='Ingress', required=True, widget=forms.TextInput( attrs={'placeholder': 'Kort ingress til karrieremuligheten'})) class Meta: model = CareerOpportunity fields = ('company', 'title', 'ingress', 'description', 'start', 'end', 'featured')
1ed040f9d64e12adf964e9f86cc1e18bd8d21593
scripts/rename.py
scripts/rename.py
import logging from scripts.util import documents from scrapi import settings from scrapi.linter import RawDocument from scrapi.processing.elasticsearch import es from scrapi.tasks import normalize, process_normalized, process_raw logger = logging.getLogger(__name__) def rename(source, target, dry=True): assert source != target, "Can't rename {} to {}, names are the same".format(source, target) count = 0 exceptions = [] for doc in documents(source): count += 1 try: raw = RawDocument({ 'doc': doc.doc, 'docID': doc.docID, 'source': target, 'filetype': doc.filetype, 'timestamps': doc.timestamps, 'versions': doc.versions }) if not dry: process_raw(raw) process_normalized(normalize(raw, raw['source']), raw) logger.info('Processed document from {} with id {}'.format(source, raw['docID'])) except Exception as e: logger.exception(e) exceptions.append(e) else: if not dry: doc.delete() es.delete(index=settings.ELASTIC_INDEX, doc_type=source, id=raw['docID'], ignore=[404]) logger.info('Deleted document from {} with id {}'.format(source, raw['docID'])) if dry: logger.info('Dry run complete') for ex in exceptions: logger.exception(e) logger.info('{} documents processed, with {} exceptions'.format(count, len(exceptions)))
import logging from scripts.util import documents from scrapi import settings from scrapi.linter import RawDocument from scrapi.processing.elasticsearch import es from scrapi.tasks import normalize, process_normalized, process_raw logger = logging.getLogger(__name__) def rename(source, target, dry=True): assert source != target, "Can't rename {} to {}, names are the same".format(source, target) count = 0 exceptions = [] for doc in documents(source): count += 1 try: raw = RawDocument({ 'doc': doc.doc, 'docID': doc.docID, 'source': target, 'filetype': doc.filetype, 'timestamps': doc.timestamps, 'versions': doc.versions }) if not dry: process_raw(raw) process_normalized(normalize(raw, raw['source']), raw) logger.info('Processed document from {} with id {}'.format(source, raw['docID'])) except Exception as e: logger.exception(e) exceptions.append(e) else: if not dry: # doc.delete() es.delete(index=settings.ELASTIC_INDEX, doc_type=source, id=raw['docID'], ignore=[404]) es.delete(index='share_v1', doc_type=source, id=raw['docID'], ignore=[404]) logger.info('Deleted document from {} with id {}'.format(source, raw['docID'])) if dry: logger.info('Dry run complete') for ex in exceptions: logger.exception(e) logger.info('{} documents processed, with {} exceptions'.format(count, len(exceptions)))
Stop cassandra from deleting documents, delete documents from old index as well
Stop cassandra from deleting documents, delete documents from old index as well
Python
apache-2.0
erinspace/scrapi,mehanig/scrapi,alexgarciac/scrapi,felliott/scrapi,fabianvf/scrapi,icereval/scrapi,jeffreyliu3230/scrapi,CenterForOpenScience/scrapi,erinspace/scrapi,mehanig/scrapi,CenterForOpenScience/scrapi,ostwald/scrapi,fabianvf/scrapi,felliott/scrapi
<INSERT> # <INSERT_END> <INSERT> doc_type=source, id=raw['docID'], ignore=[404]) es.delete(index='share_v1', <INSERT_END> <|endoftext|> import logging from scripts.util import documents from scrapi import settings from scrapi.linter import RawDocument from scrapi.processing.elasticsearch import es from scrapi.tasks import normalize, process_normalized, process_raw logger = logging.getLogger(__name__) def rename(source, target, dry=True): assert source != target, "Can't rename {} to {}, names are the same".format(source, target) count = 0 exceptions = [] for doc in documents(source): count += 1 try: raw = RawDocument({ 'doc': doc.doc, 'docID': doc.docID, 'source': target, 'filetype': doc.filetype, 'timestamps': doc.timestamps, 'versions': doc.versions }) if not dry: process_raw(raw) process_normalized(normalize(raw, raw['source']), raw) logger.info('Processed document from {} with id {}'.format(source, raw['docID'])) except Exception as e: logger.exception(e) exceptions.append(e) else: if not dry: # doc.delete() es.delete(index=settings.ELASTIC_INDEX, doc_type=source, id=raw['docID'], ignore=[404]) es.delete(index='share_v1', doc_type=source, id=raw['docID'], ignore=[404]) logger.info('Deleted document from {} with id {}'.format(source, raw['docID'])) if dry: logger.info('Dry run complete') for ex in exceptions: logger.exception(e) logger.info('{} documents processed, with {} exceptions'.format(count, len(exceptions)))
Stop cassandra from deleting documents, delete documents from old index as well import logging from scripts.util import documents from scrapi import settings from scrapi.linter import RawDocument from scrapi.processing.elasticsearch import es from scrapi.tasks import normalize, process_normalized, process_raw logger = logging.getLogger(__name__) def rename(source, target, dry=True): assert source != target, "Can't rename {} to {}, names are the same".format(source, target) count = 0 exceptions = [] for doc in documents(source): count += 1 try: raw = RawDocument({ 'doc': doc.doc, 'docID': doc.docID, 'source': target, 'filetype': doc.filetype, 'timestamps': doc.timestamps, 'versions': doc.versions }) if not dry: process_raw(raw) process_normalized(normalize(raw, raw['source']), raw) logger.info('Processed document from {} with id {}'.format(source, raw['docID'])) except Exception as e: logger.exception(e) exceptions.append(e) else: if not dry: doc.delete() es.delete(index=settings.ELASTIC_INDEX, doc_type=source, id=raw['docID'], ignore=[404]) logger.info('Deleted document from {} with id {}'.format(source, raw['docID'])) if dry: logger.info('Dry run complete') for ex in exceptions: logger.exception(e) logger.info('{} documents processed, with {} exceptions'.format(count, len(exceptions)))
7f1f001802ffdf4a53e17b120e65af3ef9d1d2da
openfisca_france/conf/cache_blacklist.py
openfisca_france/conf/cache_blacklist.py
# When using openfisca for a large population, having too many variables in cache make openfisca performances drop. # The following variables are intermadiate results and do not need to be cached in those usecases. cache_blacklist = set([ 'aide_logement_loyer_retenu', 'aide_logement_charges', 'aide_logement_R0', 'aide_logement_taux_famille', 'aide_logement_taux_loyer', 'aide_logement_participation_personnelle', 'aide_logement_loyer_seuil_degressivite', 'aide_logement_loyer_seuil_suppression', 'aide_logement_montant_brut_avant_degressivite', ])
# When using openfisca for a large population, having too many variables in cache make openfisca performances drop. # The following variables are intermediate results and do not need to be cached in those usecases. cache_blacklist = set([ 'aide_logement_loyer_retenu', 'aide_logement_charges', 'aide_logement_R0', 'aide_logement_taux_famille', 'aide_logement_taux_loyer', 'aide_logement_participation_personnelle', 'aide_logement_loyer_seuil_degressivite', 'aide_logement_loyer_seuil_suppression', 'aide_logement_montant_brut_avant_degressivite', 'aides_logement_primo_accedant', 'aides_logement_primo_accedant_k', 'aides_logement_primo_accedant_nb_part', 'aides_logement_primo_accedant_loyer_minimal', 'aides_logement_primo_accedant_plafond_mensualite', 'aides_logement_primo_accedant_ressources', ])
Add new variable to cache blacklist
Add new variable to cache blacklist
Python
agpl-3.0
sgmap/openfisca-france,antoinearnoud/openfisca-france,antoinearnoud/openfisca-france,sgmap/openfisca-france
<REPLACE_OLD> intermadiate <REPLACE_NEW> intermediate <REPLACE_END> <REPLACE_OLD> 'aide_logement_montant_brut_avant_degressivite', ]) <REPLACE_NEW> 'aide_logement_montant_brut_avant_degressivite', 'aides_logement_primo_accedant', 'aides_logement_primo_accedant_k', 'aides_logement_primo_accedant_nb_part', 'aides_logement_primo_accedant_loyer_minimal', 'aides_logement_primo_accedant_plafond_mensualite', 'aides_logement_primo_accedant_ressources', ]) <REPLACE_END> <|endoftext|> # When using openfisca for a large population, having too many variables in cache make openfisca performances drop. # The following variables are intermediate results and do not need to be cached in those usecases. cache_blacklist = set([ 'aide_logement_loyer_retenu', 'aide_logement_charges', 'aide_logement_R0', 'aide_logement_taux_famille', 'aide_logement_taux_loyer', 'aide_logement_participation_personnelle', 'aide_logement_loyer_seuil_degressivite', 'aide_logement_loyer_seuil_suppression', 'aide_logement_montant_brut_avant_degressivite', 'aides_logement_primo_accedant', 'aides_logement_primo_accedant_k', 'aides_logement_primo_accedant_nb_part', 'aides_logement_primo_accedant_loyer_minimal', 'aides_logement_primo_accedant_plafond_mensualite', 'aides_logement_primo_accedant_ressources', ])
Add new variable to cache blacklist # When using openfisca for a large population, having too many variables in cache make openfisca performances drop. # The following variables are intermadiate results and do not need to be cached in those usecases. cache_blacklist = set([ 'aide_logement_loyer_retenu', 'aide_logement_charges', 'aide_logement_R0', 'aide_logement_taux_famille', 'aide_logement_taux_loyer', 'aide_logement_participation_personnelle', 'aide_logement_loyer_seuil_degressivite', 'aide_logement_loyer_seuil_suppression', 'aide_logement_montant_brut_avant_degressivite', ])
82a00e48492f2d787c980c434d58e249c210818e
ffmpeg/_probe.py
ffmpeg/_probe.py
import json import subprocess from ._run import Error from ._utils import convert_kwargs_to_cmd_line_args def probe(filename, cmd='ffprobe', **kwargs): """Run ffprobe on the specified file and return a JSON representation of the output. Raises: :class:`ffmpeg.Error`: if ffprobe returns a non-zero exit code, an :class:`Error` is returned with a generic error message. The stderr output can be retrieved by accessing the ``stderr`` property of the exception. """ args = [cmd, '-show_format', '-show_streams', '-of', 'json'] args += convert_kwargs_to_cmd_line_args(kwargs) args += [filename] p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() if p.returncode != 0: raise Error('ffprobe', out, err) return json.loads(out.decode('utf-8')) __all__ = ['probe']
import json import subprocess from ._run import Error from ._utils import convert_kwargs_to_cmd_line_args def probe(filename, cmd='ffprobe', timeout=None, **kwargs): """Run ffprobe on the specified file and return a JSON representation of the output. Raises: :class:`ffmpeg.Error`: if ffprobe returns a non-zero exit code, an :class:`Error` is returned with a generic error message. The stderr output can be retrieved by accessing the ``stderr`` property of the exception. """ args = [cmd, '-show_format', '-show_streams', '-of', 'json'] args += convert_kwargs_to_cmd_line_args(kwargs) args += [filename] p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate(timeout=timeout) if p.returncode != 0: raise Error('ffprobe', out, err) return json.loads(out.decode('utf-8')) __all__ = ['probe']
Add optional timeout argument to probe
Add optional timeout argument to probe Popen.communicate() supports a timeout argument which is useful in case there is a risk that the probe hangs.
Python
apache-2.0
kkroening/ffmpeg-python
<INSERT> timeout=None, <INSERT_END> <REPLACE_OLD> p.communicate() <REPLACE_NEW> p.communicate(timeout=timeout) <REPLACE_END> <|endoftext|> import json import subprocess from ._run import Error from ._utils import convert_kwargs_to_cmd_line_args def probe(filename, cmd='ffprobe', timeout=None, **kwargs): """Run ffprobe on the specified file and return a JSON representation of the output. Raises: :class:`ffmpeg.Error`: if ffprobe returns a non-zero exit code, an :class:`Error` is returned with a generic error message. The stderr output can be retrieved by accessing the ``stderr`` property of the exception. """ args = [cmd, '-show_format', '-show_streams', '-of', 'json'] args += convert_kwargs_to_cmd_line_args(kwargs) args += [filename] p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate(timeout=timeout) if p.returncode != 0: raise Error('ffprobe', out, err) return json.loads(out.decode('utf-8')) __all__ = ['probe']
Add optional timeout argument to probe Popen.communicate() supports a timeout argument which is useful in case there is a risk that the probe hangs. import json import subprocess from ._run import Error from ._utils import convert_kwargs_to_cmd_line_args def probe(filename, cmd='ffprobe', **kwargs): """Run ffprobe on the specified file and return a JSON representation of the output. Raises: :class:`ffmpeg.Error`: if ffprobe returns a non-zero exit code, an :class:`Error` is returned with a generic error message. The stderr output can be retrieved by accessing the ``stderr`` property of the exception. """ args = [cmd, '-show_format', '-show_streams', '-of', 'json'] args += convert_kwargs_to_cmd_line_args(kwargs) args += [filename] p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() if p.returncode != 0: raise Error('ffprobe', out, err) return json.loads(out.decode('utf-8')) __all__ = ['probe']
e11b3c344b52c84b5e86bdc381df2f359fe63dae
fparser/setup.py
fparser/setup.py
def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('fparser',parent_package,top_path) return config
def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('fparser',parent_package,top_path) config.add_data_files('log.config') return config
Add log.config to data files to fix installed fparser.
Add log.config to data files to fix installed fparser.
Python
bsd-3-clause
dagss/f2py-g3,dagss/f2py-g3
<INSERT> config.add_data_files('log.config') <INSERT_END> <|endoftext|> def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('fparser',parent_package,top_path) config.add_data_files('log.config') return config
Add log.config to data files to fix installed fparser. def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('fparser',parent_package,top_path) return config
af4c6d9747197b23014ba71803da792f9e612a12
django_mailbox/migrations/0004_bytestring_to_unicode.py
django_mailbox/migrations/0004_bytestring_to_unicode.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('django_mailbox', '0003_auto_20150409_0316'), ] operations = [ migrations.AlterField( model_name='message', name='eml', field=models.FileField(verbose_name='Raw message contents', upload_to='messages', null=True, help_text='Original full content of message'), ), migrations.AlterField( model_name='messageattachment', name='document', field=models.FileField(verbose_name='Document', upload_to='mailbox_attachments/%Y/%m/%d/'), ), ]
Add migration to resolve inconsistency between python2 and python3 strings
Add migration to resolve inconsistency between python2 and python3 strings
Python
mit
Shekharrajak/django-mailbox,coddingtonbear/django-mailbox,ad-m/django-mailbox,leifurhauks/django-mailbox
<INSERT> # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): <INSERT_END> <INSERT> dependencies = [ ('django_mailbox', '0003_auto_20150409_0316'), ] operations = [ migrations.AlterField( model_name='message', name='eml', field=models.FileField(verbose_name='Raw message contents', upload_to='messages', null=True, help_text='Original full content of message'), ), migrations.AlterField( model_name='messageattachment', name='document', field=models.FileField(verbose_name='Document', upload_to='mailbox_attachments/%Y/%m/%d/'), ), ] <INSERT_END> <|endoftext|> # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('django_mailbox', '0003_auto_20150409_0316'), ] operations = [ migrations.AlterField( model_name='message', name='eml', field=models.FileField(verbose_name='Raw message contents', upload_to='messages', null=True, help_text='Original full content of message'), ), migrations.AlterField( model_name='messageattachment', name='document', field=models.FileField(verbose_name='Document', upload_to='mailbox_attachments/%Y/%m/%d/'), ), ]
Add migration to resolve inconsistency between python2 and python3 strings
810961f65c37d27c5e2d99cf102064d0b4e300f3
project/apiv2/views.py
project/apiv2/views.py
from django.db.models import Q from django.shortcuts import render from rest_framework.filters import OrderingFilter, SearchFilter from rest_framework.generics import ListAPIView from rest_framework_json_api.renderers import JSONRenderer from rest_framework.generics import RetrieveUpdateDestroyAPIView from bookmarks.models import Bookmark from bookmarks.serializers import BookmarkSerializer class BookmarkListCreateAPIView(ListAPIView): queryset = Bookmark.objects.all() serializer_class = BookmarkSerializer resource_name = 'bookmark' action = 'list' renderer_classes = (JSONRenderer,) filter_backends = (SearchFilter, OrderingFilter) search_fields = ('url', 'title') ordering_fields = ('id', 'url', 'title', 'bookmarked_at') class BookmarkRetrieveUpdateDestroyAPIView(RetrieveUpdateDestroyAPIView): queryset = Bookmark.objects.all() serializer_class = BookmarkSerializer lookup_field = 'bookmark_id'
from django.db.models import Q from django.shortcuts import render from rest_framework.filters import OrderingFilter, SearchFilter from rest_framework_json_api.renderers import JSONRenderer from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView from bookmarks.models import Bookmark from bookmarks.serializers import BookmarkSerializer class BookmarkListCreateAPIView(ListCreateAPIView): queryset = Bookmark.objects.all() serializer_class = BookmarkSerializer resource_name = 'bookmark' action = 'list' renderer_classes = (JSONRenderer,) filter_backends = (SearchFilter, OrderingFilter) search_fields = ('url', 'title') ordering_fields = ('id', 'url', 'title', 'bookmarked_at') class BookmarkRetrieveUpdateDestroyAPIView(RetrieveUpdateDestroyAPIView): queryset = Bookmark.objects.all() serializer_class = BookmarkSerializer lookup_field = 'bookmark_id'
Use ListCreateAPIView as base class to support bookmark creation
Use ListCreateAPIView as base class to support bookmark creation
Python
mit
hnakamur/django-bootstrap-table-example,hnakamur/django-bootstrap-table-example,hnakamur/django-bootstrap-table-example
<DELETE> rest_framework.generics import ListAPIView from <DELETE_END> <INSERT> ListCreateAPIView, <INSERT_END> <REPLACE_OLD> BookmarkListCreateAPIView(ListAPIView): <REPLACE_NEW> BookmarkListCreateAPIView(ListCreateAPIView): <REPLACE_END> <|endoftext|> from django.db.models import Q from django.shortcuts import render from rest_framework.filters import OrderingFilter, SearchFilter from rest_framework_json_api.renderers import JSONRenderer from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView from bookmarks.models import Bookmark from bookmarks.serializers import BookmarkSerializer class BookmarkListCreateAPIView(ListCreateAPIView): queryset = Bookmark.objects.all() serializer_class = BookmarkSerializer resource_name = 'bookmark' action = 'list' renderer_classes = (JSONRenderer,) filter_backends = (SearchFilter, OrderingFilter) search_fields = ('url', 'title') ordering_fields = ('id', 'url', 'title', 'bookmarked_at') class BookmarkRetrieveUpdateDestroyAPIView(RetrieveUpdateDestroyAPIView): queryset = Bookmark.objects.all() serializer_class = BookmarkSerializer lookup_field = 'bookmark_id'
Use ListCreateAPIView as base class to support bookmark creation from django.db.models import Q from django.shortcuts import render from rest_framework.filters import OrderingFilter, SearchFilter from rest_framework.generics import ListAPIView from rest_framework_json_api.renderers import JSONRenderer from rest_framework.generics import RetrieveUpdateDestroyAPIView from bookmarks.models import Bookmark from bookmarks.serializers import BookmarkSerializer class BookmarkListCreateAPIView(ListAPIView): queryset = Bookmark.objects.all() serializer_class = BookmarkSerializer resource_name = 'bookmark' action = 'list' renderer_classes = (JSONRenderer,) filter_backends = (SearchFilter, OrderingFilter) search_fields = ('url', 'title') ordering_fields = ('id', 'url', 'title', 'bookmarked_at') class BookmarkRetrieveUpdateDestroyAPIView(RetrieveUpdateDestroyAPIView): queryset = Bookmark.objects.all() serializer_class = BookmarkSerializer lookup_field = 'bookmark_id'
29061254e99f8e02e8285c3ebc965866c8c9d378
testing/chess_engine_fight.py
testing/chess_engine_fight.py
#!/usr/bin/python import subprocess, os, sys if len(sys.argv) < 2: print('Must specify file names of 2 chess engines') for i in range(len(sys.argv)): print(str(i) + ': ' + sys.argv[i]) sys.exit(1) generator = './' + sys.argv[-2] checker = './' + sys.argv[-1] game_file = 'game.pgn' count = 0 while True: try: os.remove(game_file) except OSError: pass count += 1 print('Game #' + str(count)) out = subprocess.run([generator, '-random', '-random']) if not os.path.isfile(game_file): print('Game file not produced: ' + game_file) print('generator = ' + generator) print(out.returncode) print(out.stdout) print(out.stderr) sys.exit() result = subprocess.run([checker, '-confirm', game_file]) if result.returncode != 0: print('Found discrepancy. See ' + game_file) print('generator = ' + generator) print('checker = ' + checker) sys.exit() generator, checker = checker, generator
#!/usr/bin/python import subprocess, os, sys if len(sys.argv) < 2: print('Must specify file names of 2 chess engines') for i in range(len(sys.argv)): print(str(i) + ': ' + sys.argv[i]) sys.exit(1) generator = './' + sys.argv[-2] checker = './' + sys.argv[-1] game_file = 'game.pgn' count = 0 while True: try: os.remove(game_file) except OSError: pass if os.path.isfile(game_file): print('Could not delete output file:', game_file) count += 1 print('Game #' + str(count)) out = subprocess.run([generator, '-random', '-random']) if not os.path.isfile(game_file): print('Game file not produced: ' + game_file) print('generator = ' + generator) print(out.returncode) print(out.stdout) print(out.stderr) sys.exit() result = subprocess.run([checker, '-confirm', game_file]) if result.returncode != 0: print('Found discrepancy. See ' + game_file) print('generator = ' + generator) print('checker = ' + checker) sys.exit() generator, checker = checker, generator
Check that engine fight files are deleted before test
Check that engine fight files are deleted before test
Python
mit
MarkZH/Genetic_Chess,MarkZH/Genetic_Chess,MarkZH/Genetic_Chess,MarkZH/Genetic_Chess,MarkZH/Genetic_Chess
<INSERT> if os.path.isfile(game_file): print('Could not delete output file:', game_file) <INSERT_END> <|endoftext|> #!/usr/bin/python import subprocess, os, sys if len(sys.argv) < 2: print('Must specify file names of 2 chess engines') for i in range(len(sys.argv)): print(str(i) + ': ' + sys.argv[i]) sys.exit(1) generator = './' + sys.argv[-2] checker = './' + sys.argv[-1] game_file = 'game.pgn' count = 0 while True: try: os.remove(game_file) except OSError: pass if os.path.isfile(game_file): print('Could not delete output file:', game_file) count += 1 print('Game #' + str(count)) out = subprocess.run([generator, '-random', '-random']) if not os.path.isfile(game_file): print('Game file not produced: ' + game_file) print('generator = ' + generator) print(out.returncode) print(out.stdout) print(out.stderr) sys.exit() result = subprocess.run([checker, '-confirm', game_file]) if result.returncode != 0: print('Found discrepancy. See ' + game_file) print('generator = ' + generator) print('checker = ' + checker) sys.exit() generator, checker = checker, generator
Check that engine fight files are deleted before test #!/usr/bin/python import subprocess, os, sys if len(sys.argv) < 2: print('Must specify file names of 2 chess engines') for i in range(len(sys.argv)): print(str(i) + ': ' + sys.argv[i]) sys.exit(1) generator = './' + sys.argv[-2] checker = './' + sys.argv[-1] game_file = 'game.pgn' count = 0 while True: try: os.remove(game_file) except OSError: pass count += 1 print('Game #' + str(count)) out = subprocess.run([generator, '-random', '-random']) if not os.path.isfile(game_file): print('Game file not produced: ' + game_file) print('generator = ' + generator) print(out.returncode) print(out.stdout) print(out.stderr) sys.exit() result = subprocess.run([checker, '-confirm', game_file]) if result.returncode != 0: print('Found discrepancy. See ' + game_file) print('generator = ' + generator) print('checker = ' + checker) sys.exit() generator, checker = checker, generator
7741968b9d48afc7ac135742774ae911e2611c83
tests/test_util.py
tests/test_util.py
from grazer.util import time_convert, grouper class TestTimeConvert(object): def test_seconds(self): assert time_convert("10s") == 10 def test_minutes(self): assert time_convert("2m") == 120 def test_hours(self): assert time_convert("3h") == 3 * 60 * 60 class TestGrouper(object): def test_simple_seq(self): seq = range(0, 10) result = list(grouper(2, seq)) assert len(result) == 5
from grazer.util import time_convert, grouper class TestTimeConvert(object): def test_seconds(self): assert time_convert("10s") == 10 def test_minutes(self): assert time_convert("2m") == 120 def test_hours(self): assert time_convert("3h") == 3 * 60 * 60 class TestGrouper(object): def test_simple_seq(self): seq = range(0, 10) result = list(grouper(2, seq)) assert len(result) == 5 def test_odd_seq(self): seq = range(0, 10) result = list(grouper(3, seq)) assert len(result) == 4 assert result[-1] == (9, None, None)
Cover case when seq is oneven
Cover case when seq is oneven
Python
mit
CodersOfTheNight/verata
<REPLACE_OLD> 5 <REPLACE_NEW> 5 def test_odd_seq(self): seq = range(0, 10) result = list(grouper(3, seq)) assert len(result) == 4 assert result[-1] == (9, None, None) <REPLACE_END> <|endoftext|> from grazer.util import time_convert, grouper class TestTimeConvert(object): def test_seconds(self): assert time_convert("10s") == 10 def test_minutes(self): assert time_convert("2m") == 120 def test_hours(self): assert time_convert("3h") == 3 * 60 * 60 class TestGrouper(object): def test_simple_seq(self): seq = range(0, 10) result = list(grouper(2, seq)) assert len(result) == 5 def test_odd_seq(self): seq = range(0, 10) result = list(grouper(3, seq)) assert len(result) == 4 assert result[-1] == (9, None, None)
Cover case when seq is oneven from grazer.util import time_convert, grouper class TestTimeConvert(object): def test_seconds(self): assert time_convert("10s") == 10 def test_minutes(self): assert time_convert("2m") == 120 def test_hours(self): assert time_convert("3h") == 3 * 60 * 60 class TestGrouper(object): def test_simple_seq(self): seq = range(0, 10) result = list(grouper(2, seq)) assert len(result) == 5
5cdc5755b1a687c9b34bfd575163ac367816f12a
migrations/versions/3961ccb5d884_increase_artifact_name_length.py
migrations/versions/3961ccb5d884_increase_artifact_name_length.py
"""increase artifact name length Revision ID: 3961ccb5d884 Revises: 1b229c83511d Create Date: 2015-11-05 15:34:28.189700 """ # revision identifiers, used by Alembic. revision = '3961ccb5d884' down_revision = '1b229c83511d' from alembic import op import sqlalchemy as sa def upgrade(): op.alter_column('artifact', 'name', sa.VARCHAR(1024)) def downgrade(): op.alter_column('artifact', 'name', sa.VARCHAR(128))
"""increase artifact name length Revision ID: 3961ccb5d884 Revises: 1b229c83511d Create Date: 2015-11-05 15:34:28.189700 """ # revision identifiers, used by Alembic. revision = '3961ccb5d884' down_revision = '1b229c83511d' from alembic import op import sqlalchemy as sa def upgrade(): op.alter_column('artifact', 'name', type_=sa.VARCHAR(1024)) def downgrade(): op.alter_column('artifact', 'name', type_=sa.VARCHAR(128))
Fix extend artifact name migration script.
Fix extend artifact name migration script. Test Plan: ran migration locally and checked table schema Reviewers: anupc, kylec Reviewed By: kylec Subscribers: changesbot Differential Revision: https://tails.corp.dropbox.com/D151824
Python
apache-2.0
dropbox/changes,dropbox/changes,dropbox/changes,dropbox/changes
<REPLACE_OLD> sa.VARCHAR(1024)) def <REPLACE_NEW> type_=sa.VARCHAR(1024)) def <REPLACE_END> <REPLACE_OLD> sa.VARCHAR(128)) <REPLACE_NEW> type_=sa.VARCHAR(128)) <REPLACE_END> <|endoftext|> """increase artifact name length Revision ID: 3961ccb5d884 Revises: 1b229c83511d Create Date: 2015-11-05 15:34:28.189700 """ # revision identifiers, used by Alembic. revision = '3961ccb5d884' down_revision = '1b229c83511d' from alembic import op import sqlalchemy as sa def upgrade(): op.alter_column('artifact', 'name', type_=sa.VARCHAR(1024)) def downgrade(): op.alter_column('artifact', 'name', type_=sa.VARCHAR(128))
Fix extend artifact name migration script. Test Plan: ran migration locally and checked table schema Reviewers: anupc, kylec Reviewed By: kylec Subscribers: changesbot Differential Revision: https://tails.corp.dropbox.com/D151824 """increase artifact name length Revision ID: 3961ccb5d884 Revises: 1b229c83511d Create Date: 2015-11-05 15:34:28.189700 """ # revision identifiers, used by Alembic. revision = '3961ccb5d884' down_revision = '1b229c83511d' from alembic import op import sqlalchemy as sa def upgrade(): op.alter_column('artifact', 'name', sa.VARCHAR(1024)) def downgrade(): op.alter_column('artifact', 'name', sa.VARCHAR(128))
8cac0c660eee774c32b87d2511e4d2eeddf0ffe8
scripts/slave/chromium/dart_buildbot_run.py
scripts/slave/chromium/dart_buildbot_run.py
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Entry point for the dartium buildbots. This script is called from buildbot and reports results using the buildbot annotation scheme. """ import sys from common import chromium_utils def main(): return chromium_utils.RunCommand( [sys.executable, 'src/build/buildbot_annotated_steps.py', ]) if __name__ == '__main__': sys.exit(main())
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Entry point for the dartium buildbots. This script is called from buildbot and reports results using the buildbot annotation scheme. """ import os import sys from common import chromium_utils def main(): builder_name = os.getenv('BUILDBOT_BUILDERNAME', default='') is_release_bot = builder_name.startswith('release') script = '' if is_release_bot: script = 'src/dartium_tools/buildbot_release_annotated_steps.py' else: script = 'src/dartium_tools/buildbot_annotated_steps.py' return chromium_utils.RunCommand([sys.executable, script]) if __name__ == '__main__': sys.exit(main())
Call dartium_tools/buildbot_annotated_steps.py directly, there is no need for moving this as part of the dartium patching process.
Call dartium_tools/buildbot_annotated_steps.py directly, there is no need for moving this as part of the dartium patching process. Additionally, start calling a new script for release builds (there are none yet, but this is what will be used to build the sdk and editor) TBR=foo Review URL: https://chromiumcodereview.appspot.com/11466003 git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@171512 0039d316-1c4b-4281-b951-d872f2087c98
Python
bsd-3-clause
eunchong/build,eunchong/build,eunchong/build,eunchong/build
<INSERT> os import <INSERT_END> <REPLACE_OLD> chromium_utils def <REPLACE_NEW> chromium_utils def <REPLACE_END> <REPLACE_OLD> return chromium_utils.RunCommand( <REPLACE_NEW> builder_name = os.getenv('BUILDBOT_BUILDERNAME', default='') is_release_bot = builder_name.startswith('release') script = '' if is_release_bot: <REPLACE_END> <INSERT> script = 'src/dartium_tools/buildbot_release_annotated_steps.py' <INSERT_END> <REPLACE_OLD> [sys.executable, <REPLACE_NEW> else: <REPLACE_END> <INSERT> script = 'src/dartium_tools/buildbot_annotated_steps.py' <INSERT_END> <REPLACE_OLD> 'src/build/buildbot_annotated_steps.py', ]) if <REPLACE_NEW> return chromium_utils.RunCommand([sys.executable, script]) if <REPLACE_END> <|endoftext|> #!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Entry point for the dartium buildbots. This script is called from buildbot and reports results using the buildbot annotation scheme. """ import os import sys from common import chromium_utils def main(): builder_name = os.getenv('BUILDBOT_BUILDERNAME', default='') is_release_bot = builder_name.startswith('release') script = '' if is_release_bot: script = 'src/dartium_tools/buildbot_release_annotated_steps.py' else: script = 'src/dartium_tools/buildbot_annotated_steps.py' return chromium_utils.RunCommand([sys.executable, script]) if __name__ == '__main__': sys.exit(main())
Call dartium_tools/buildbot_annotated_steps.py directly, there is no need for moving this as part of the dartium patching process. Additionally, start calling a new script for release builds (there are none yet, but this is what will be used to build the sdk and editor) TBR=foo Review URL: https://chromiumcodereview.appspot.com/11466003 git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@171512 0039d316-1c4b-4281-b951-d872f2087c98 #!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Entry point for the dartium buildbots. This script is called from buildbot and reports results using the buildbot annotation scheme. """ import sys from common import chromium_utils def main(): return chromium_utils.RunCommand( [sys.executable, 'src/build/buildbot_annotated_steps.py', ]) if __name__ == '__main__': sys.exit(main())
67fcadfa8fd3e6c4161ca4756cc65f0db1386c06
usercustomize.py
usercustomize.py
""" Customize Python Interpreter. Link your user customizing file to this file. For more info see: https://docs.python.org/3/library/site.html "Default value is ~/.local/lib/pythonX.Y/site-packages for UNIX and non-framework Mac OS X builds, ~/Library/Python/X.Y/lib/python/site-packages for Mac framework builds, and %APPDATA%\Python\PythonXY\site-packages on Windows." Sun May 4 18:06:08 CST 2014 """ import cgitb cgitb.enable(format='text')
""" Customize Python Interpreter. Link your user customizing file to this file. For more info see: https://docs.python.org/3/library/site.html "Default value is ~/.local/lib/pythonX.Y/site-packages for UNIX and non-framework Mac OS X builds, ~/Library/Python/X.Y/lib/python/site-packages for Mac framework builds, and %APPDATA%\Python\PythonXY\site-packages on Windows." Sun May 4 18:06:08 CST 2014 """ import cgitb import sys import os import os.path cgitb.enable(format='text') sys.path.insert(0, os.path.join(os.environ['HOME'], 'gtk/inst/lib/python2.7/site-packages'))
Add OS X GTK to Python path.
Add OS X GTK to Python path.
Python
mit
fossilet/dotfiles,fossilet/dotfiles,fossilet/dotfiles
<REPLACE_OLD> cgitb cgitb.enable(format='text') <REPLACE_NEW> cgitb import sys import os import os.path cgitb.enable(format='text') sys.path.insert(0, os.path.join(os.environ['HOME'], 'gtk/inst/lib/python2.7/site-packages')) <REPLACE_END> <|endoftext|> """ Customize Python Interpreter. Link your user customizing file to this file. For more info see: https://docs.python.org/3/library/site.html "Default value is ~/.local/lib/pythonX.Y/site-packages for UNIX and non-framework Mac OS X builds, ~/Library/Python/X.Y/lib/python/site-packages for Mac framework builds, and %APPDATA%\Python\PythonXY\site-packages on Windows." Sun May 4 18:06:08 CST 2014 """ import cgitb import sys import os import os.path cgitb.enable(format='text') sys.path.insert(0, os.path.join(os.environ['HOME'], 'gtk/inst/lib/python2.7/site-packages'))
Add OS X GTK to Python path. """ Customize Python Interpreter. Link your user customizing file to this file. For more info see: https://docs.python.org/3/library/site.html "Default value is ~/.local/lib/pythonX.Y/site-packages for UNIX and non-framework Mac OS X builds, ~/Library/Python/X.Y/lib/python/site-packages for Mac framework builds, and %APPDATA%\Python\PythonXY\site-packages on Windows." Sun May 4 18:06:08 CST 2014 """ import cgitb cgitb.enable(format='text')