commit
stringlengths 40
40
| old_file
stringlengths 4
150
| new_file
stringlengths 4
150
| old_contents
stringlengths 0
3.26k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
501
| message
stringlengths 15
4.06k
| lang
stringclasses 4
values | license
stringclasses 13
values | repos
stringlengths 5
91.5k
| diff
stringlengths 0
4.35k
|
---|---|---|---|---|---|---|---|---|---|---|
891a85fc427b16295c6f792d7311eca1e497332e | api/__init__.py | api/__init__.py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from os import getenv
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = getenv('DATABASE_URL',
default='postgresql://postgres@localhost:5432/loadstone')
db = SQLAlchemy(app)
import api.views
| from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from os import getenv
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = getenv('DATABASE_URL', default='sqlite://')
db = SQLAlchemy(app)
import api.views
| Set default to sqlite memory | Set default to sqlite memory
| Python | mit | Demotivated/loadstone | ---
+++
@@ -5,8 +5,7 @@
app = Flask(__name__)
-app.config['SQLALCHEMY_DATABASE_URI'] = getenv('DATABASE_URL',
- default='postgresql://postgres@localhost:5432/loadstone')
+app.config['SQLALCHEMY_DATABASE_URI'] = getenv('DATABASE_URL', default='sqlite://')
db = SQLAlchemy(app)
import api.views |
27ffe13842cfd346c568a51299b8f2349daf32c0 | app/__init__.py | app/__init__.py | import logging
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
| Add proper formatting of logs in app init | Add proper formatting of logs in app init
| Python | mit | futuresimple/triggear | ---
+++
@@ -0,0 +1,6 @@
+import logging
+
+logging.basicConfig(
+ format='%(asctime)s %(levelname)-8s %(message)s',
+ level=logging.INFO,
+ datefmt='%Y-%m-%d %H:%M:%S') |
|
9c877984d1f9175660911d9cac457d6ff87b2754 | troposphere/validators.py | troposphere/validators.py | # Copyright (c) 2012-2013, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
def boolean(x):
if x in [True, 1, '1', 'true', 'True']:
return "true"
if x in [False, 0, '0', 'false', 'False']:
return "false"
raise ValueError
def integer(x):
if isinstance(x, int):
return x
if isinstance(x, basestring):
int(x)
return x
def positive_integer(x):
p = integer(x)
if p < 0:
raise ValueError
return x
def integer_range(minimum_val, maximum_val):
def integer_range_checker(x):
i = int(x)
if i < minimum_val or i > maximum_val:
raise ValueError('Integer must be between %d and %d' % (
minimum_val, maximum_val))
return x
return integer_range_checker
def network_port(x):
from . import AWSHelperFn
# Network ports can be Ref items
if isinstance(x, AWSHelperFn):
return x
i = int(x)
if i < -1 or i > 65535:
raise ValueError
return x
| # Copyright (c) 2012-2013, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
def boolean(x):
if x in [True, 1, '1', 'true', 'True']:
return "true"
if x in [False, 0, '0', 'false', 'False']:
return "false"
raise ValueError
def integer(x):
if isinstance(x, int):
return x
if isinstance(x, basestring):
int(x)
return x
def positive_integer(x):
p = integer(x)
if int(p) < 0:
raise ValueError
return x
def integer_range(minimum_val, maximum_val):
def integer_range_checker(x):
i = int(x)
if i < minimum_val or i > maximum_val:
raise ValueError('Integer must be between %d and %d' % (
minimum_val, maximum_val))
return x
return integer_range_checker
def network_port(x):
from . import AWSHelperFn
# Network ports can be Ref items
if isinstance(x, AWSHelperFn):
return x
i = int(x)
if i < -1 or i > 65535:
raise ValueError
return x
| Make sure we're comparing integers in positive_integer | Make sure we're comparing integers in positive_integer
| Python | bsd-2-clause | jantman/troposphere,amosshapira/troposphere,ikben/troposphere,samcrang/troposphere,pas256/troposphere,ptoraskar/troposphere,cryptickp/troposphere,xxxVxxx/troposphere,horacio3/troposphere,alonsodomin/troposphere,alonsodomin/troposphere,pas256/troposphere,DualSpark/troposphere,7digital/troposphere,jdc0589/troposphere,iblazevic/troposphere,ccortezb/troposphere,johnctitus/troposphere,yxd-hde/troposphere,mannytoledo/troposphere,wangqiang8511/troposphere,micahhausler/troposphere,kid/troposphere,7digital/troposphere,WeAreCloudar/troposphere,dmm92/troposphere,dmm92/troposphere,inetCatapult/troposphere,johnctitus/troposphere,nicolaka/troposphere,horacio3/troposphere,ikben/troposphere,craigbruce/troposphere,Hons/troposphere,garnaat/troposphere,cloudtools/troposphere,unravelin/troposphere,LouTheBrew/troposphere,mhahn/troposphere,cloudtools/troposphere,Yipit/troposphere | ---
+++
@@ -22,7 +22,7 @@
def positive_integer(x):
p = integer(x)
- if p < 0:
+ if int(p) < 0:
raise ValueError
return x
|
8c8eb5207fd34ba381b89cb147dd3c38b68cf3ad | stocks.py | stocks.py | #!/usr/bin/env python
def find_points(prices, window):
pivot = None
next_pivot = None
profit = 0
for i, price in enumerate(prices):
if pivot is None or price < prices[pivot]:
pivot = i
next_pivot = max(next_pivot, pivot + 1)
if pivot != i and (next_pivot is None or price < prices[next_pivot]):
next_pivot = i
if i - pivot == window:
pivot = next_pivot
next_pivot = pivot + 1
profit = max(profit, price - prices[pivot])
return profit
def main():
print find_points([1.0, 2.0, 3.0, 1.0, 3.0, 4.0], 5)
print find_points([7.0, 5.0, 6.0, 4.0, 5.0, 3.0, 4.0, 2.0, 3.0, 1.0], 5)
print find_points([4.0, 3.0, 2.0, 4.0, 3.0, 1.0, 1.1, 1.2, 1.3, 3.4], 5)
if __name__ == "__main__":
main()
| #!/usr/bin/env python
def find_profit(prices, window):
pivot = None
next_pivot = None
profit = 0
for i, price in enumerate(prices):
if pivot is None or price < prices[pivot]:
pivot = i
next_pivot = max(next_pivot, pivot + 1)
if pivot != i and (next_pivot is None or price < prices[next_pivot]):
next_pivot = i
if i - pivot == window:
pivot = next_pivot
next_pivot += 1
profit = max(profit, price - prices[pivot])
return profit
def main():
print find_profit([1.0, 2.0, 3.0, 1.0, 3.0, 4.0], 5)
print find_profit([7.0, 5.0, 6.0, 4.0, 5.0, 3.0, 4.0, 2.0, 3.0, 1.0], 5)
print find_profit([4.0, 3.0, 2.0, 4.0, 3.0, 1.0, 1.1, 1.2, 1.3, 3.4], 5)
if __name__ == "__main__":
main()
| Change the name of the function | Change the name of the function
| Python | mit | jrasky/planetlabs-challenge | ---
+++
@@ -1,7 +1,7 @@
#!/usr/bin/env python
-def find_points(prices, window):
+def find_profit(prices, window):
pivot = None
next_pivot = None
profit = 0
@@ -16,7 +16,7 @@
if i - pivot == window:
pivot = next_pivot
- next_pivot = pivot + 1
+ next_pivot += 1
profit = max(profit, price - prices[pivot])
@@ -24,11 +24,11 @@
def main():
- print find_points([1.0, 2.0, 3.0, 1.0, 3.0, 4.0], 5)
+ print find_profit([1.0, 2.0, 3.0, 1.0, 3.0, 4.0], 5)
- print find_points([7.0, 5.0, 6.0, 4.0, 5.0, 3.0, 4.0, 2.0, 3.0, 1.0], 5)
+ print find_profit([7.0, 5.0, 6.0, 4.0, 5.0, 3.0, 4.0, 2.0, 3.0, 1.0], 5)
- print find_points([4.0, 3.0, 2.0, 4.0, 3.0, 1.0, 1.1, 1.2, 1.3, 3.4], 5)
+ print find_profit([4.0, 3.0, 2.0, 4.0, 3.0, 1.0, 1.1, 1.2, 1.3, 3.4], 5)
if __name__ == "__main__":
main() |
e4abd01162bfd4af8cd1f2e657e8a6343b766e8c | anchor/names.py | anchor/names.py | """
Names of the modalities
"""
# Set constants of the names of the models so they can always be referenced
# as variables rather than strings
# Most of the density is at 0
NEAR_ZERO = '~0'
# Old "middle" modality - most of the density is at 0.5
NEAR_HALF = 'concurrent'
# Most of the density is at 1
NEAR_ONE = '~1'
# The density is split between 0 and 1
BOTH_ONE_ZERO = 'bimodal'
# Cannot decide on one of the above models (the null model fits better) so use
# this model instead
NULL_MODEL = 'mixed'
| """
Names of the modalities
"""
# Set constants of the names of the models so they can always be referenced
# as variables rather than strings
# Most of the density is at 0
NEAR_ZERO = '~0'
# Old "middle" modality - most of the density is at 0.5
NEAR_HALF = 'concurrent'
# Most of the density is at 1
NEAR_ONE = '~1'
# The density is split between 0 and 1
BOTH_ONE_ZERO = 'bimodal'
# Cannot decide on one of the above models (the null model fits better) so use
# this model instead
NULL_MODEL = 'ambivalent'
| Change null model name to ambivalent | Change null model name to ambivalent
| Python | bsd-3-clause | YeoLab/anchor | ---
+++
@@ -19,4 +19,4 @@
# Cannot decide on one of the above models (the null model fits better) so use
# this model instead
-NULL_MODEL = 'mixed'
+NULL_MODEL = 'ambivalent' |
15a7ced2d0da014e5d5508ed50c045de3cc9e9d2 | _lib/wordpress_faq_processor.py | _lib/wordpress_faq_processor.py | import sys
import json
import os.path
import requests
def posts_at_url(url):
current_page = 1
max_page = sys.maxint
while current_page <= max_page:
url = os.path.expandvars(url)
resp = requests.get(url, params={'page': current_page, 'count': '-1'})
results = json.loads(resp.content)
current_page += 1
max_page = results['pages']
for p in results['posts']:
yield p
def documents(name, url, **kwargs):
for post in posts_at_url(url):
yield process_post(post)
def process_post(post):
post['_id'] = post['slug']
names = ['og_title', 'og_image', 'og_desc', 'twtr_text', 'twtr_lang',
'twtr_rel', 'twtr_hash', 'utm_campaign', 'utm_term',
'utm_content', 'faq']
for name in names:
if name in post['custom_fields']:
post[name] = post['custom_fields'][name]
if 'taxonomy_fj_tag' in post:
post['tags'] = [tag['title'] for tag in post['taxonomy_fj_tag']]
del post['custom_fields']
return post
| import sys
import json
import os.path
import requests
def posts_at_url(url):
current_page = 1
max_page = sys.maxint
while current_page <= max_page:
url = os.path.expandvars(url)
resp = requests.get(url, params={'page': current_page, 'count': '-1'})
results = json.loads(resp.content)
current_page += 1
max_page = results['pages']
for p in results['posts']:
yield p
def documents(name, url, **kwargs):
for post in posts_at_url(url):
yield process_post(post)
def process_post(post):
post['_id'] = post['slug']
names = ['og_title', 'og_image', 'og_desc', 'twtr_text', 'twtr_lang',
'twtr_rel', 'twtr_hash', 'utm_campaign', 'utm_term',
'utm_content', 'faq']
for name in names:
if name in post['custom_fields']:
post[name] = post['custom_fields'][name]
if 'taxonomy_fj_tag' in post:
post['tags'] = [tag['title'] for tag in post['taxonomy_fj_tag']]
del post['custom_fields']
return {'_index': 'content',
'_type': 'faq',
'_id': post['slug'],
'_source': post}
| Change faq processor to bulk index | Change faq processor to bulk index
| Python | cc0-1.0 | imuchnik/cfgov-refresh,imuchnik/cfgov-refresh,imuchnik/cfgov-refresh,imuchnik/cfgov-refresh | ---
+++
@@ -41,4 +41,7 @@
del post['custom_fields']
- return post
+ return {'_index': 'content',
+ '_type': 'faq',
+ '_id': post['slug'],
+ '_source': post} |
f45e182ec206ab08b1bea699033938b562558670 | test/test_compression.py | test/test_compression.py | import unittest
import bmemcached
import bz2
class MemcachedTests(unittest.TestCase):
def setUp(self):
self.server = '127.0.0.1:11211'
self.client = bmemcached.Client(self.server, 'user', 'password')
self.bzclient = bmemcached.Client(self.server, 'user', 'password', bz2)
self.data = b'this is test data. ' * 32
def tearDown(self):
self.client.delete(b'test_key')
self.client.delete(b'test_key2')
self.client.disconnect_all()
self.bzclient.disconnect_all()
def testCompressedData(self):
self.client.set(b'test_key', self.data)
self.assertEqual(self.data, self.client.get(b'test_key'))
def testBZ2CompressedData(self):
self.bzclient.set(b'test_key', self.data)
self.assertEqual(self.data, self.bzclient.get(b'test_key'))
def testCompressionMissmatch(self):
self.client.set(b'test_key', self.data)
self.bzclient.set(b'test_key2', self.data)
self.assertEqual(self.client.get(b'test_key'),
self.bzclient.get(b'test_key2'))
self.assertRaises(IOError, self.bzclient.get, b'test_key')
| import unittest
import bz2
import bmemcached
class MemcachedTests(unittest.TestCase):
def setUp(self):
self.server = '127.0.0.1:11211'
self.client = bmemcached.Client(self.server, 'user', 'password')
self.bzclient = bmemcached.Client(self.server, 'user', 'password',
compression=bz2)
self.data = b'this is test data. ' * 32
def tearDown(self):
self.client.delete(b'test_key')
self.client.delete(b'test_key2')
self.client.disconnect_all()
self.bzclient.disconnect_all()
def testCompressedData(self):
self.client.set(b'test_key', self.data)
self.assertEqual(self.data, self.client.get(b'test_key'))
def testBZ2CompressedData(self):
self.bzclient.set(b'test_key', self.data)
self.assertEqual(self.data, self.bzclient.get(b'test_key'))
def testCompressionMissmatch(self):
self.client.set(b'test_key', self.data)
self.bzclient.set(b'test_key2', self.data)
self.assertEqual(self.client.get(b'test_key'),
self.bzclient.get(b'test_key2'))
self.assertRaises(IOError, self.bzclient.get, b'test_key')
| Use keyword arguments to avoid accidentally setting timeout | Use keyword arguments to avoid accidentally setting timeout
| Python | mit | xmonster-tech/python-binary-memcached,jaysonsantos/python-binary-memcached,xmonster-tech/python-binary-memcached,jaysonsantos/python-binary-memcached | ---
+++
@@ -1,12 +1,13 @@
import unittest
+import bz2
import bmemcached
-import bz2
class MemcachedTests(unittest.TestCase):
def setUp(self):
self.server = '127.0.0.1:11211'
self.client = bmemcached.Client(self.server, 'user', 'password')
- self.bzclient = bmemcached.Client(self.server, 'user', 'password', bz2)
+ self.bzclient = bmemcached.Client(self.server, 'user', 'password',
+ compression=bz2)
self.data = b'this is test data. ' * 32
def tearDown(self):
@@ -29,4 +30,3 @@
self.assertEqual(self.client.get(b'test_key'),
self.bzclient.get(b'test_key2'))
self.assertRaises(IOError, self.bzclient.get, b'test_key')
- |
6bbd81efbd4821a3963a021d8456531f01edfd6c | tests/test_rover_instance.py | tests/test_rover_instance.py |
from unittest import TestCase
from rover import Rover
class TestRover(TestCase):
def setUp(self):
self.rover = Rover()
def test_rover_compass(self):
assert self.rover.compass == ['N', 'E', 'S', 'W']
def test_rover_position(self):
assert self.rover.position == (self.rover.x, self.rover.y, self.rover.direction)
|
from unittest import TestCase
from rover import Rover
class TestRover(TestCase):
def setUp(self):
self.rover = Rover()
def test_rover_compass(self):
assert self.rover.compass == ['N', 'E', 'S', 'W']
def test_rover_position(self):
assert self.rover.position == (self.rover.x, self.rover.y, self.rover.direction)
def test_rover_set_position(self):
self.rover.set_position(4, 9, 'W')
assert self.rover.position == (4, 9, 'W')
| Add failing test for set position method | Add failing test for set position method
| Python | mit | authentik8/rover | ---
+++
@@ -12,3 +12,7 @@
def test_rover_position(self):
assert self.rover.position == (self.rover.x, self.rover.y, self.rover.direction)
+
+ def test_rover_set_position(self):
+ self.rover.set_position(4, 9, 'W')
+ assert self.rover.position == (4, 9, 'W') |
31eae0aee3a6ae9fa7abea312ff1ea843a98e853 | graphene/contrib/django/tests/models.py | graphene/contrib/django/tests/models.py | from __future__ import absolute_import
from django.db import models
class Pet(models.Model):
name = models.CharField(max_length=30)
class Film(models.Model):
reporters = models.ManyToManyField('Reporter',
related_name='films')
class Reporter(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email = models.EmailField()
pets = models.ManyToManyField('self')
def __str__(self): # __unicode__ on Python 2
return "%s %s" % (self.first_name, self.last_name)
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateField()
reporter = models.ForeignKey(Reporter, related_name='articles')
def __str__(self): # __unicode__ on Python 2
return self.headline
class Meta:
ordering = ('headline',)
| from __future__ import absolute_import
from django.db import models
class Pet(models.Model):
name = models.CharField(max_length=30)
class Film(models.Model):
reporters = models.ManyToManyField('Reporter',
related_name='films')
class Reporter(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email = models.EmailField()
pets = models.ManyToManyField('self')
def __str__(self): # __unicode__ on Python 2
return "%s %s" % (self.first_name, self.last_name)
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateField()
reporter = models.ForeignKey(Reporter, related_name='articles')
lang = models.CharField(max_length=2, help_text='Language', choices=[
('es', 'Spanish'),
('en', 'English')
], default='es')
def __str__(self): # __unicode__ on Python 2
return self.headline
class Meta:
ordering = ('headline',)
| Improve Django field conversion real-life tests | Improve Django field conversion real-life tests
| Python | mit | graphql-python/graphene,sjhewitt/graphene,Globegitter/graphene,sjhewitt/graphene,Globegitter/graphene,graphql-python/graphene | ---
+++
@@ -26,6 +26,10 @@
headline = models.CharField(max_length=100)
pub_date = models.DateField()
reporter = models.ForeignKey(Reporter, related_name='articles')
+ lang = models.CharField(max_length=2, help_text='Language', choices=[
+ ('es', 'Spanish'),
+ ('en', 'English')
+ ], default='es')
def __str__(self): # __unicode__ on Python 2
return self.headline |
6d1117fbba83b258162cc0f397573e21cd31543e | batch_effect.py | batch_effect.py | #!/usr/bin/env python
import argparse
import csv
import shutil
import subprocess
import sys
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Chain together Inkscape extensions")
parser.add_argument('--id', type=str, action='append', dest='ids', default=[],
help="ID attributes of objects to manipulate. Passed to all extensions.")
parser.add_argument('--csvpath', type=str, required=True,
help="Path to .csv file containing command lines")
parser.add_argument('svgpath', type=str, nargs='?', default='',
help="Path to temporary SVG file to use for input to the first extension")
args = parser.parse_args()
with open(args.csvpath, 'rb') as f:
# Make an argument list of the ids
id_args = []
for id in args.ids:
id_args.extend(('--id', id))
# Take input for the first call from temporary file or stdin
if args.svgpath:
stream = open(args.svgpath)
else:
stream = sys.stdin
# Execute all the calls
for row in csv.reader(f):
# Insert the ids into the call
call = row[:1] + id_args + row[1:]
# Make the call
p = subprocess.Popen(call, stdin=stream, stdout=subprocess.PIPE)
# Close our handle to the input pipe because we no longer need it
stream.close()
# Grab the output pipe for input into the next call
stream = p.stdout
# Send output from last call on stdout
shutil.copyfileobj(stream, sys.stdout)
| #!/usr/bin/env python
import csv
import optparse
import shutil
import subprocess
import sys
if __name__ == '__main__':
parser = optparse.OptionParser(description="Chain together Inkscape extensions",
usage="%prog [options] svgpath")
parser.add_option('--id', dest='ids', action='append', type=str, default=[],
help="ID attributes of objects to manipulate. Passed to all extensions.")
parser.add_option('--csvpath', dest='csvpath', type=str,
help="Path to .csv file containing command lines")
options, args = parser.parse_args()
with open(options.csvpath, 'rb') as f:
# Make an argument list of the ids
id_args = []
for id in options.ids:
id_args.extend(('--id', id))
# Take input for the first call from temporary file or stdin
if args:
stream = open(args[0])
else:
stream = sys.stdin
# Execute all the calls
for row in csv.reader(f):
# Insert the ids into the call
call = row[:1] + id_args + row[1:]
# Make the call
p = subprocess.Popen(call, stdin=stream, stdout=subprocess.PIPE)
# Close our handle to the input pipe because we no longer need it
stream.close()
# Grab the output pipe for input into the next call
stream = p.stdout
# Send output from last call on stdout
shutil.copyfileobj(stream, sys.stdout)
| Make compatible with Python <2.7 | Make compatible with Python <2.7
The argparse module was added in Python 2.7, but the Python bundled
with Inkscape is 2.6. Switching to optparse makes this extension
compatible with the Python bundled with Inkscape.
| Python | mit | jturner314/inkscape-batch-effect | ---
+++
@@ -1,29 +1,28 @@
#!/usr/bin/env python
-import argparse
import csv
+import optparse
import shutil
import subprocess
import sys
if __name__ == '__main__':
- parser = argparse.ArgumentParser(description="Chain together Inkscape extensions")
- parser.add_argument('--id', type=str, action='append', dest='ids', default=[],
- help="ID attributes of objects to manipulate. Passed to all extensions.")
- parser.add_argument('--csvpath', type=str, required=True,
- help="Path to .csv file containing command lines")
- parser.add_argument('svgpath', type=str, nargs='?', default='',
- help="Path to temporary SVG file to use for input to the first extension")
- args = parser.parse_args()
+ parser = optparse.OptionParser(description="Chain together Inkscape extensions",
+ usage="%prog [options] svgpath")
+ parser.add_option('--id', dest='ids', action='append', type=str, default=[],
+ help="ID attributes of objects to manipulate. Passed to all extensions.")
+ parser.add_option('--csvpath', dest='csvpath', type=str,
+ help="Path to .csv file containing command lines")
+ options, args = parser.parse_args()
- with open(args.csvpath, 'rb') as f:
+ with open(options.csvpath, 'rb') as f:
# Make an argument list of the ids
id_args = []
- for id in args.ids:
+ for id in options.ids:
id_args.extend(('--id', id))
# Take input for the first call from temporary file or stdin
- if args.svgpath:
- stream = open(args.svgpath)
+ if args:
+ stream = open(args[0])
else:
stream = sys.stdin
# Execute all the calls |
2b80ef0b732403dea1af72693ebb2adc19863cac | test_hash.py | test_hash.py | from hash_table import HashTable
import io
import pytest
words = []
with io.open('/usr/share/dict/words', 'r') as word_file:
words = word_file.readlines()
def test_init():
ht = HashTable()
assert len(ht.table) == 1024
ht2 = HashTable(10000)
assert len(ht2.table) == 10000
def test_hash():
ht = HashTable()
ht.set('coffee', 'coffee')
assert ht.get('coffee') == 'coffee'
def test_duplicate_hash_val():
ht = HashTable()
ht.set('bob', 'bob')
ht.set('obb', 'obb')
assert ht.get('bob') == 'bob'
assert ht.get('obb') == 'obb'
def test_word_file():
ht = HashTable()
for word in words:
ht.set(word, word)
assert ht.get(words[654]) == words[654]
assert ht.get(words[3541]) == words[3541]
assert ht.get(words[6541]) == words[6541]
def test_non_item():
ht = HashTable()
ht.set('coffee', 'coffee')
with pytest.raises(IndexError):
ht.get('milk')
def test_non_bucket():
ht = HashTable()
with pytest.raises(IndexError):
ht.table[1025]
| from hash_table import HashTable
import io
import pytest
words = []
with io.open('/usr/share/dict/words', 'r') as word_file:
words = word_file.readlines()
def test_init():
ht = HashTable()
assert len(ht.table) == 1024
ht2 = HashTable(10000)
assert len(ht2.table) == 10000
def test_hash():
ht = HashTable()
ht.set('coffee', 'coffee')
assert ht.get('coffee') == 'coffee'
def test_duplicate_hash_val():
ht = HashTable()
ht.set('bob', 'bob')
ht.set('obb', 'obb')
assert ht.get('bob') == 'bob'
assert ht.get('obb') == 'obb'
def test_word_file():
ht = HashTable()
for word in words:
ht.set(word, word)
assert ht.get(words[654]) == words[654]
assert ht.get(words[3541]) == words[3541]
assert ht.get(words[6541]) == words[6541]
def test_non_item():
ht = HashTable()
ht.set('coffee', 'coffee')
with pytest.raises(KeyError):
ht.get('milk')
def test_non_bucket():
ht = HashTable()
with pytest.raises(IndexError):
ht.table[1025]
| Change test to check for KeyError instead of IndexError for test_non_item | Change test to check for KeyError instead of IndexError for test_non_item
| Python | mit | jwarren116/data-structures-deux | ---
+++
@@ -42,7 +42,7 @@
def test_non_item():
ht = HashTable()
ht.set('coffee', 'coffee')
- with pytest.raises(IndexError):
+ with pytest.raises(KeyError):
ht.get('milk')
|
25ebc324c0af6e1ce74535cc75227071637a7a18 | areaScraper.py | areaScraper.py | # Craigslist City Scraper
# By Marshall Ehlinger
# For sp2015 Systems Analysis and Design
from bs4 import BeautifulSoup
import re
fh = open("sites.htm", "r")
soup = BeautifulSoup(fh, "html.parser")
for columnDiv in soup.h1.next_sibling.next_sibling:
for state in columnDiv:
for city in state:
print(city)
#print(soup.text)
print("\n----Done----\n\n")
| #!/usr/bin/python3.4
# Craigslist City Scraper
# By Marshall Ehlinger
# For sp2015 Systems Analysis and Design
# Returns dictionary of 'city name string' : 'site url'
# for all American cities in states/territories @ CL
from bs4 import BeautifulSoup
import re
def getCities():
fh = open("sites.htm", "r")
soup = BeautifulSoup(fh, "html.parser")
placesDict = {}
for columnDiv in soup.h1.next_sibling.next_sibling:
for state in columnDiv:
for city in state:
m = (re.search('<li><a href="(.+)">(.+)</a>', str(city)))
if m:
placesDict[m.group(2)] = m.group(1)
return(placesDict)
getCities()
| Complete site scraper for all American cities | Complete site scraper for all American cities
areaScraper.py contains the getCities() function, which will
return a dictionary of 'city name string' : 'url string'
for each Craigslist "site", corresponding to American cities,
regions, etc.
| Python | mit | MuSystemsAnalysis/craigslist_area_search,MuSystemsAnalysis/craigslist_area_search | ---
+++
@@ -1,17 +1,28 @@
+#!/usr/bin/python3.4
+
# Craigslist City Scraper
# By Marshall Ehlinger
# For sp2015 Systems Analysis and Design
+# Returns dictionary of 'city name string' : 'site url'
+# for all American cities in states/territories @ CL
+
from bs4 import BeautifulSoup
import re
-fh = open("sites.htm", "r")
-soup = BeautifulSoup(fh, "html.parser")
+def getCities():
-for columnDiv in soup.h1.next_sibling.next_sibling:
- for state in columnDiv:
- for city in state:
- print(city)
+ fh = open("sites.htm", "r")
+ soup = BeautifulSoup(fh, "html.parser")
+ placesDict = {}
-#print(soup.text)
-print("\n----Done----\n\n")
+ for columnDiv in soup.h1.next_sibling.next_sibling:
+ for state in columnDiv:
+ for city in state:
+ m = (re.search('<li><a href="(.+)">(.+)</a>', str(city)))
+ if m:
+ placesDict[m.group(2)] = m.group(1)
+
+ return(placesDict)
+
+getCities() |
e8d9dae51dc812e94236d1cc45cf1479d88f02f6 | grokapi/queries.py | grokapi/queries.py | # -*- coding: utf-8 -*-
class Grok:
"""stats.grok.se article statistics."""
def __init__(self, title, site):
self.site = site
self.title = title
def _make_url(self, year, month):
"""Make the URL to the JSON output of stats.grok.se service."""
base_url = "http://stats.grok.se/json/"
return base_url + "{0:s}/{1:d}{2:02d}/{3:s}".format(self.site, year, month, self.title)
| # -*- coding: utf-8 -*-
class Grok(object):
"""stats.grok.se article statistics."""
def __init__(self, title, site):
self.site = site
self.title = title
def _make_url(self, year, month):
"""Make the URL to the JSON output of stats.grok.se service."""
base_url = "http://stats.grok.se/json/"
return base_url + "{0:s}/{1:d}{2:02d}/{3:s}".format(self.site, year, month, self.title)
| Make Grok a new-style class inheriting from object | Make Grok a new-style class inheriting from object
| Python | mit | Commonists/Grokapi | ---
+++
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
-class Grok:
+class Grok(object):
"""stats.grok.se article statistics."""
|
49a7968e51ce850428936fb2fc66c905ce8b8998 | head1stpython/Chapter3/sketch.py | head1stpython/Chapter3/sketch.py | #Import dependencies
#Load OS functions from the standard library
import os
os.chdir('/home/israel/Development/Python_Exercises/python-octo-wookie/head1stpython/Chapter3')
#Change path for the current directory
data = open('sketch.txt')
#Start iteration over the text file
for each_line in data:
try:
(role, line_spoken) = each_line.split(':', 1)
print(role, end = '')
print(' said: ', end = '')
print(line_spoken, end = '')
except:
pass
data.close()
| #Import dependencies
#Load OS functions from the standard library
import os
#Change path for the current directory
os.chdir('/home/israel/Development/Python_Exercises/python-octo-wookie/head1stpython/Chapter3')
#Check if file exists
if os.path.exists('sketch.txt'):
#Load the text file into 'data' variable
data = open('sketch.txt')
#Start iteration over the text file
for each_line in data:
#We use try/except to handle errors that can occur with bad input
try:
(role, line_spoken) = each_line.split(':', 1)
print(role, end = '')
print(' said: ', end = '')
print(line_spoken, end = '')
except:
pass
#After all the iteration and printing, we close the file
data.close()
#If file does exists, we simply quit and display an error for the user/dev
else:
print('The data file is missing!')
| Validate if the file exists (if/else) | Validate if the file exists (if/else)
| Python | unlicense | israelzuniga/python-octo-wookie | ---
+++
@@ -2,21 +2,30 @@
#Load OS functions from the standard library
import os
+#Change path for the current directory
os.chdir('/home/israel/Development/Python_Exercises/python-octo-wookie/head1stpython/Chapter3')
-#Change path for the current directory
-data = open('sketch.txt')
+#Check if file exists
+if os.path.exists('sketch.txt'):
+
+ #Load the text file into 'data' variable
+ data = open('sketch.txt')
+ #Start iteration over the text file
+ for each_line in data:
+ #We use try/except to handle errors that can occur with bad input
+ try:
+ (role, line_spoken) = each_line.split(':', 1)
+ print(role, end = '')
+ print(' said: ', end = '')
+ print(line_spoken, end = '')
+ except:
+ pass
-#Start iteration over the text file
-for each_line in data:
- try:
- (role, line_spoken) = each_line.split(':', 1)
- print(role, end = '')
- print(' said: ', end = '')
- print(line_spoken, end = '')
- except:
- pass
+ #After all the iteration and printing, we close the file
+ data.close()
-data.close()
+#If file does exists, we simply quit and display an error for the user/dev
+else:
+ print('The data file is missing!')
|
a15d1df33fece7ddeefcbeb5a8094df2ebccd7c6 | tests/test_dict_utils.py | tests/test_dict_utils.py | import unittest
from dict_utils import dict_utils
class DictUtilsTestCase(unittest.TestCase):
def test_dict_search(self):
pass
| import unittest
from dict_utils import dict_utils
class DictUtilsTestCase(unittest.TestCase):
def test_dict_search_found(self):
dict_1 = {'first_level': {'second_level': {'name': 'Joe', 'Age': 30}}}
found_value = dict_utils.dict_search_value(dict_1, 'name')
self.assertEqual(found_value, 'Joe', 'Key not found in the given dict')
def test_dict_search_not_found(self):
dict_1 = {'first_level': {'second_level': {'name': 'Joe', 'Age': 30}}}
found_value = dict_utils.dict_search_value(dict_1, 'address')
self.assertNotEquals(found_value, 'London (UK)', 'Key not found in the given dict')
def test_dict_search_different_value(self):
dict_1 = {'first_level': {'second_level': {'name': 'Joe', 'Age': 30}}}
found_value = dict_utils.dict_search_value(dict_1, 'name')
self.assertNotEquals(found_value, 'Paul', 'Found value is not different')
def test_compare_assert_dicts_identical(self):
dict_1 = {'first_level': {'second_level': {'name': 'Joe', 'Age': 30}}}
dict_2 = {'first_level': {'second_level': {'name': 'Joe', 'Age': 30}}}
dict_utils.compare_assert_dicts(self, ['name', 'age'], dict_1, dict_2)
def test_compare_assert_dicts_different_same_values(self):
dict_1 = {'first_level': {'second_level': {'name': 'Joe', 'Age': 30}}}
dict_2 = {'level_1': {'level_2': {'name': 'Joe', 'Age': 30}}}
dict_utils.compare_assert_dicts(self, ['name', 'age'], dict_1, dict_2)
def test_compare_assert_dicts_different_keys_structure_same_values(self):
dict_1 = {'first_level': {'second_level': {'name': 'Joe', 'Age': 30}}}
dict_2 = {'level_1': {'name': 'Joe', 'Age': 30}}
dict_utils.compare_assert_dicts(self, ['name', 'age'], dict_1, dict_2)
| Add some tests for the implemented methods | Add some tests for the implemented methods
| Python | mit | glowdigitalmedia/dict-utils | ---
+++
@@ -4,5 +4,32 @@
class DictUtilsTestCase(unittest.TestCase):
- def test_dict_search(self):
- pass
+ def test_dict_search_found(self):
+ dict_1 = {'first_level': {'second_level': {'name': 'Joe', 'Age': 30}}}
+ found_value = dict_utils.dict_search_value(dict_1, 'name')
+ self.assertEqual(found_value, 'Joe', 'Key not found in the given dict')
+
+ def test_dict_search_not_found(self):
+ dict_1 = {'first_level': {'second_level': {'name': 'Joe', 'Age': 30}}}
+ found_value = dict_utils.dict_search_value(dict_1, 'address')
+ self.assertNotEquals(found_value, 'London (UK)', 'Key not found in the given dict')
+
+ def test_dict_search_different_value(self):
+ dict_1 = {'first_level': {'second_level': {'name': 'Joe', 'Age': 30}}}
+ found_value = dict_utils.dict_search_value(dict_1, 'name')
+ self.assertNotEquals(found_value, 'Paul', 'Found value is not different')
+
+ def test_compare_assert_dicts_identical(self):
+ dict_1 = {'first_level': {'second_level': {'name': 'Joe', 'Age': 30}}}
+ dict_2 = {'first_level': {'second_level': {'name': 'Joe', 'Age': 30}}}
+ dict_utils.compare_assert_dicts(self, ['name', 'age'], dict_1, dict_2)
+
+ def test_compare_assert_dicts_different_same_values(self):
+ dict_1 = {'first_level': {'second_level': {'name': 'Joe', 'Age': 30}}}
+ dict_2 = {'level_1': {'level_2': {'name': 'Joe', 'Age': 30}}}
+ dict_utils.compare_assert_dicts(self, ['name', 'age'], dict_1, dict_2)
+
+ def test_compare_assert_dicts_different_keys_structure_same_values(self):
+ dict_1 = {'first_level': {'second_level': {'name': 'Joe', 'Age': 30}}}
+ dict_2 = {'level_1': {'name': 'Joe', 'Age': 30}}
+ dict_utils.compare_assert_dicts(self, ['name', 'age'], dict_1, dict_2) |
48be23ec035daa041edc77f5478a2405a8311428 | tests/test_request_id.py | tests/test_request_id.py | from unittest import mock
from werkzeug.test import EnvironBuilder
from notifications_utils import request_helper
from notifications_utils.request_helper import CustomRequest
def test_get_request_id_from_request_id_header():
builder = EnvironBuilder()
builder.headers['NotifyRequestID'] = 'from-header'
builder.headers['NotifyDownstreamNotifyRequestID'] = 'from-downstream'
request = CustomRequest(builder.get_environ())
request_id = request._get_request_id('NotifyRequestID',
'NotifyDownstreamRequestID')
assert request_id == 'from-header'
def test_request_id_is_set_on_response(app):
request_helper.init_app(app)
client = app.test_client()
with app.app_context():
response = client.get('/', headers={'NotifyRequestID': 'generated'})
assert response.headers['NotifyRequestID'] == 'generated'
def test_request_id_is_set_on_error_response(app):
request_helper.init_app(app)
client = app.test_client()
# turn off DEBUG so that the flask default error handler gets triggered
app.config['DEBUG'] = False
@app.route('/')
def error_route():
raise Exception()
with app.app_context():
response = client.get('/', headers={'NotifyRequestID': 'generated'})
assert response.status_code == 500
assert response.headers['NotifyRequestID'] == 'generated'
| from unittest import mock
from werkzeug.test import EnvironBuilder
from notifications_utils import request_helper
from notifications_utils.request_helper import CustomRequest
def test_get_request_id_from_request_id_header():
builder = EnvironBuilder()
builder.headers['X-B3-TraceId'] = 'from-header'
request = CustomRequest(builder.get_environ())
request_id = request.request_id()
assert request_id == 'from-header'
def test_request_id_is_set_on_response(app):
request_helper.init_app(app)
client = app.test_client()
with app.app_context():
response = client.get('/', headers={
'X-B3-TraceId': 'generated',
'X-B3-SpanId': 'generated'
})
assert response.headers['X-B3-TraceId'] == 'generated'
assert response.headers['X-B3-SpanId'] == 'generated'
def test_request_id_is_set_on_error_response(app):
request_helper.init_app(app)
client = app.test_client()
# turn off DEBUG so that the flask default error handler gets triggered
app.config['DEBUG'] = False
@app.route('/')
def error_route():
raise Exception()
with app.app_context():
response = client.get('/', headers={
'X-B3-TraceId': 'generated',
'X-B3-SpanId': 'generated'
})
assert response.status_code == 500
assert response.headers['X-B3-TraceId'] == 'generated'
assert response.headers['X-B3-SpanId'] == 'generated'
| Refactor tests to check for the new headers | Refactor tests to check for the new headers
| Python | mit | alphagov/notifications-utils | ---
+++
@@ -7,12 +7,10 @@
def test_get_request_id_from_request_id_header():
builder = EnvironBuilder()
- builder.headers['NotifyRequestID'] = 'from-header'
- builder.headers['NotifyDownstreamNotifyRequestID'] = 'from-downstream'
+ builder.headers['X-B3-TraceId'] = 'from-header'
request = CustomRequest(builder.get_environ())
- request_id = request._get_request_id('NotifyRequestID',
- 'NotifyDownstreamRequestID')
+ request_id = request.request_id()
assert request_id == 'from-header'
@@ -22,8 +20,12 @@
client = app.test_client()
with app.app_context():
- response = client.get('/', headers={'NotifyRequestID': 'generated'})
- assert response.headers['NotifyRequestID'] == 'generated'
+ response = client.get('/', headers={
+ 'X-B3-TraceId': 'generated',
+ 'X-B3-SpanId': 'generated'
+ })
+ assert response.headers['X-B3-TraceId'] == 'generated'
+ assert response.headers['X-B3-SpanId'] == 'generated'
def test_request_id_is_set_on_error_response(app):
@@ -37,6 +39,10 @@
raise Exception()
with app.app_context():
- response = client.get('/', headers={'NotifyRequestID': 'generated'})
+ response = client.get('/', headers={
+ 'X-B3-TraceId': 'generated',
+ 'X-B3-SpanId': 'generated'
+ })
assert response.status_code == 500
- assert response.headers['NotifyRequestID'] == 'generated'
+ assert response.headers['X-B3-TraceId'] == 'generated'
+ assert response.headers['X-B3-SpanId'] == 'generated' |
07a8ca051b46a04df806647202144bd563d5dc5a | tests/locale_utils.py | tests/locale_utils.py |
import subprocess
"""Helper functions, decorators,... for working with locales"""
def get_avail_locales():
return {loc.strip() for loc in subprocess.check_output(["locale", "-a"]).split()}
def requires_locales(locales):
"""A decorator factory to skip tests that require unavailable locales
:param set locales: set of required locales
**Requires the test to have the set of available locales defined as its
``avail_locales`` attribute.**
"""
canon_locales = {loc.replace("UTF-8", "utf8") for loc in locales}
def decorator(test_method):
def decorated(test, *args):
missing = canon_locales - set(test.avail_locales)
if missing:
test.skipTest("requires missing locales: %s" % missing)
else:
return test_method(test, *args)
return decorated
return decorator
|
import subprocess
"""Helper functions, decorators,... for working with locales"""
def get_avail_locales():
return {loc.decode(errors="replace").strip() for loc in subprocess.check_output(["locale", "-a"]).split()}
def requires_locales(locales):
"""A decorator factory to skip tests that require unavailable locales
:param set locales: set of required locales
**Requires the test to have the set of available locales defined as its
``avail_locales`` attribute.**
"""
canon_locales = {loc.replace("UTF-8", "utf8") for loc in locales}
def decorator(test_method):
def decorated(test, *args):
missing = canon_locales - set(test.avail_locales)
if missing:
test.skipTest("requires missing locales: %s" % missing)
else:
return test_method(test, *args)
return decorated
return decorator
| Fix checking for available locales | Fix checking for available locales
"subprocess.check" returns bytes, so we need to decode the lang
codes before comparing them with required languages.
| Python | lgpl-2.1 | rhinstaller/libbytesize,rhinstaller/libbytesize,rhinstaller/libbytesize | ---
+++
@@ -4,7 +4,7 @@
"""Helper functions, decorators,... for working with locales"""
def get_avail_locales():
- return {loc.strip() for loc in subprocess.check_output(["locale", "-a"]).split()}
+ return {loc.decode(errors="replace").strip() for loc in subprocess.check_output(["locale", "-a"]).split()}
def requires_locales(locales):
"""A decorator factory to skip tests that require unavailable locales |
80aff93d3f0040f5886e983a6ce781717f7703a4 | sites/www/conf.py | sites/www/conf.py | # Obtain shared config values
import sys
import os
from os.path import abspath, join, dirname
sys.path.append(abspath(join(dirname(__file__), '..')))
from shared_conf import *
# Local blog extension
sys.path.append(abspath('.'))
extensions.append('blog')
rss_link = 'http://paramiko.org'
rss_description = 'Paramiko project news'
# Releases changelog extension
extensions.append('releases')
releases_release_uri = "https://github.com/paramiko/paramiko/tree/%s"
releases_issue_uri = "https://github.com/paramiko/paramiko/issues/%s"
# Intersphinx for referencing API/usage docs
extensions.append('sphinx.ext.intersphinx')
# Default is 'local' building, but reference the public docs site when building
# under RTD.
target = join(dirname(__file__), '..', 'docs', '_build')
if os.environ.get('READTHEDOCS') == 'True':
# TODO: switch to docs.paramiko.org post go-live of sphinx API docs
target = 'http://docs.paramiko.org/en/latest/'
intersphinx_mapping = {
'docs': (target, None),
}
# Sister-site links to API docs
html_theme_options['extra_nav_links'] = {
"API Docs": 'http://docs.paramiko.org',
}
| # Obtain shared config values
import sys
import os
from os.path import abspath, join, dirname
sys.path.append(abspath(join(dirname(__file__), '..')))
from shared_conf import *
# Local blog extension
sys.path.append(abspath('.'))
extensions.append('blog')
rss_link = 'http://paramiko.org'
rss_description = 'Paramiko project news'
# Releases changelog extension
extensions.append('releases')
# Paramiko 1.x tags start with 'v'. Meh.
releases_release_uri = "https://github.com/paramiko/paramiko/tree/v%s"
releases_issue_uri = "https://github.com/paramiko/paramiko/issues/%s"
# Intersphinx for referencing API/usage docs
extensions.append('sphinx.ext.intersphinx')
# Default is 'local' building, but reference the public docs site when building
# under RTD.
target = join(dirname(__file__), '..', 'docs', '_build')
if os.environ.get('READTHEDOCS') == 'True':
# TODO: switch to docs.paramiko.org post go-live of sphinx API docs
target = 'http://docs.paramiko.org/en/latest/'
intersphinx_mapping = {
'docs': (target, None),
}
# Sister-site links to API docs
html_theme_options['extra_nav_links'] = {
"API Docs": 'http://docs.paramiko.org',
}
| Fix broken tag-tree links in changelog | Fix broken tag-tree links in changelog
| Python | lgpl-2.1 | SebastianDeiss/paramiko,zarr12steven/paramiko,thisch/paramiko,thusoy/paramiko,toby82/paramiko,dorianpula/paramiko,jorik041/paramiko,zpzgone/paramiko,paramiko/paramiko,mhdaimi/paramiko,dlitz/paramiko,Automatic/paramiko,varunarya10/paramiko,ameily/paramiko,fvicente/paramiko,digitalquacks/paramiko,selboo/paramiko,anadigi/paramiko,CptLemming/paramiko,jaraco/paramiko,davidbistolas/paramiko,mirrorcoder/paramiko,redixin/paramiko,torkil/paramiko,remram44/paramiko,reaperhulk/paramiko,esc/paramiko,rcorrieri/paramiko | ---
+++
@@ -14,7 +14,8 @@
# Releases changelog extension
extensions.append('releases')
-releases_release_uri = "https://github.com/paramiko/paramiko/tree/%s"
+# Paramiko 1.x tags start with 'v'. Meh.
+releases_release_uri = "https://github.com/paramiko/paramiko/tree/v%s"
releases_issue_uri = "https://github.com/paramiko/paramiko/issues/%s"
# Intersphinx for referencing API/usage docs |
e366f6da5673a4c92ffcf65492951e0c6fc886ed | tests/test_element.py | tests/test_element.py | import rml.element
def test_create_element():
e = rml.element.Element('BPM', 6.0)
assert e.get_type() == 'BPM'
assert e.get_length() == 6.0
def test_add_element_to_family():
e = rml.element.Element('dummy', 0.0)
e.add_to_family('fam')
assert 'fam' in e.get_families()
| import pkg_resources
pkg_resources.require('cothread')
import cothread
import rml.element
def test_create_element():
e = rml.element.Element('BPM', 6.0)
assert e.get_type() == 'BPM'
assert e.get_length() == 6.0
def test_add_element_to_family():
e = rml.element.Element('dummy', 0.0)
e.add_to_family('fam')
assert 'fam' in e.get_families()
def test_get_pv_value():
PV = 'SR22C-DI-EBPM-04:SA:X'
e = rml.element.Element('dummy', 0.0, pv=PV)
result = e.get_pv('x')
assert isinstance(result, float)
| Test before creating the get_pv() method | Test before creating the get_pv() method
| Python | apache-2.0 | razvanvasile/RML,willrogers/pml,willrogers/pml | ---
+++
@@ -1,3 +1,6 @@
+import pkg_resources
+pkg_resources.require('cothread')
+import cothread
import rml.element
@@ -11,3 +14,10 @@
e = rml.element.Element('dummy', 0.0)
e.add_to_family('fam')
assert 'fam' in e.get_families()
+
+
+def test_get_pv_value():
+ PV = 'SR22C-DI-EBPM-04:SA:X'
+ e = rml.element.Element('dummy', 0.0, pv=PV)
+ result = e.get_pv('x')
+ assert isinstance(result, float) |
0b884ed68f2c4b482f9eadbf38adc01f7d869f1a | tests/test_exports.py | tests/test_exports.py | import unittest
import websockets
import websockets.client
import websockets.exceptions
import websockets.legacy.auth
import websockets.legacy.client
import websockets.legacy.protocol
import websockets.legacy.server
import websockets.server
import websockets.typing
import websockets.uri
combined_exports = (
websockets.legacy.auth.__all__
+ websockets.legacy.client.__all__
+ websockets.legacy.protocol.__all__
+ websockets.legacy.server.__all__
+ websockets.client.__all__
+ websockets.exceptions.__all__
+ websockets.server.__all__
+ websockets.typing.__all__
+ websockets.uri.__all__
)
class TestExportsAllSubmodules(unittest.TestCase):
def test_top_level_module_reexports_all_submodule_exports(self):
self.assertEqual(set(combined_exports), set(websockets.__all__))
def test_submodule_exports_are_globally_unique(self):
self.assertEqual(len(set(combined_exports)), len(combined_exports))
| import unittest
import websockets
import websockets.client
import websockets.exceptions
import websockets.legacy.auth
import websockets.legacy.client
import websockets.legacy.protocol
import websockets.legacy.server
import websockets.server
import websockets.typing
import websockets.uri
combined_exports = (
websockets.legacy.auth.__all__
+ websockets.legacy.client.__all__
+ websockets.legacy.protocol.__all__
+ websockets.legacy.server.__all__
+ websockets.client.__all__
+ websockets.exceptions.__all__
+ websockets.server.__all__
+ websockets.typing.__all__
+ websockets.uri.__all__
)
class ExportsTests(unittest.TestCase):
def test_top_level_module_reexports_all_submodule_exports(self):
self.assertEqual(set(combined_exports), set(websockets.__all__))
def test_submodule_exports_are_globally_unique(self):
self.assertEqual(len(set(combined_exports)), len(combined_exports))
| Rename test class consistently with others. | Rename test class consistently with others.
| Python | bsd-3-clause | aaugustin/websockets,aaugustin/websockets,aaugustin/websockets,aaugustin/websockets | ---
+++
@@ -25,7 +25,7 @@
)
-class TestExportsAllSubmodules(unittest.TestCase):
+class ExportsTests(unittest.TestCase):
def test_top_level_module_reexports_all_submodule_exports(self):
self.assertEqual(set(combined_exports), set(websockets.__all__))
|
3bbe539f387697137040f665958e0e0e27e6a420 | tests/test_session.py | tests/test_session.py | # Local imports
from uplink import session
def test_base_url(uplink_builder_mock):
# Setup
uplink_builder_mock.base_url = "https://api.github.com"
sess = session.Session(uplink_builder_mock)
# Run & Verify
assert uplink_builder_mock.base_url == sess.base_url
def test_headers(uplink_builder_mock):
# Setup
sess = session.Session(uplink_builder_mock)
# Run
sess.headers["key"] = "value"
# Verify
assert uplink_builder_mock.add_hook.called
assert sess.headers == {"key": "value"}
def test_params(uplink_builder_mock):
# Setup
sess = session.Session(uplink_builder_mock)
# Run
sess.params["key"] = "value"
# Verify
uplink_builder_mock.add_hook.assert_called()
assert sess.params == {"key": "value"}
def test_auth(uplink_builder_mock):
# Setup
uplink_builder_mock.auth = ("username", "password")
sess = session.Session(uplink_builder_mock)
# Run & Verify
assert uplink_builder_mock.auth == sess.auth
def test_auth_set(uplink_builder_mock):
# Setup
sess = session.Session(uplink_builder_mock)
# Run
sess.auth = ("username", "password")
# Verify
assert ("username", "password") == uplink_builder_mock.auth
| # Local imports
from uplink import session
def test_base_url(uplink_builder_mock):
# Setup
uplink_builder_mock.base_url = "https://api.github.com"
sess = session.Session(uplink_builder_mock)
# Run & Verify
assert uplink_builder_mock.base_url == sess.base_url
def test_headers(uplink_builder_mock):
# Setup
sess = session.Session(uplink_builder_mock)
# Run
sess.headers["key"] = "value"
# Verify
assert uplink_builder_mock.add_hook.called
assert sess.headers == {"key": "value"}
def test_params(uplink_builder_mock):
# Setup
sess = session.Session(uplink_builder_mock)
# Run
sess.params["key"] = "value"
# Verify
assert uplink_builder_mock.add_hook.called
assert sess.params == {"key": "value"}
def test_auth(uplink_builder_mock):
# Setup
uplink_builder_mock.auth = ("username", "password")
sess = session.Session(uplink_builder_mock)
# Run & Verify
assert uplink_builder_mock.auth == sess.auth
def test_auth_set(uplink_builder_mock):
# Setup
sess = session.Session(uplink_builder_mock)
# Run
sess.auth = ("username", "password")
# Verify
assert ("username", "password") == uplink_builder_mock.auth
| Fix `assert_called` usage for Python 3.5 build | Fix `assert_called` usage for Python 3.5 build
The `assert_called` method seems to invoke a bug caused by a type in the
unittest mock module. (The bug was ultimately tracked and fix here:
https://bugs.python.org/issue24656)
| Python | mit | prkumar/uplink | ---
+++
@@ -31,7 +31,7 @@
sess.params["key"] = "value"
# Verify
- uplink_builder_mock.add_hook.assert_called()
+ assert uplink_builder_mock.add_hook.called
assert sess.params == {"key": "value"}
|
72f3a5d1bc4c69cb0641ea5529655d5b68d156c1 | fluentcms_googlemaps/views.py | fluentcms_googlemaps/views.py | import json
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse, Http404, HttpResponseBadRequest
from django.views.generic.detail import BaseDetailView
from .models import Marker
class MarkerDetailView(BaseDetailView):
"""
Simple view for fetching marker details.
"""
# TODO: support different object types. Perhaps through django-polymorphic?
model = Marker
pk_url_kwarg = 'id'
def get_object(self, queryset=None):
"""
Returns the object the view is displaying.
"""
if queryset is None:
queryset = self.get_queryset()
# Take a GET parameter instead of URLConf variable.
try:
pk = long(self.request.GET[self.pk_url_kwarg])
except (KeyError, ValueError):
return HttpResponseBadRequest("Invalid Parameters")
queryset = queryset.filter(pk=pk)
try:
# Get the single item from the filtered queryset
obj = queryset.get()
except ObjectDoesNotExist as e:
raise Http404(e)
return obj
def render_to_response(self, context):
return HttpResponse(json.dumps(self.get_json_data(context)), content_type='application/json; charset=utf-8')
def get_json_data(self, context):
"""
Generate the JSON data to send back to the client.
:rtype: dict
"""
return self.object.to_dict(detailed=True)
| import json
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse, Http404
from django.views.generic.detail import BaseDetailView
from .models import Marker
class MarkerDetailView(BaseDetailView):
"""
Simple view for fetching marker details.
"""
# TODO: support different object types. Perhaps through django-polymorphic?
model = Marker
pk_url_kwarg = 'id'
def get_object(self, queryset=None):
"""
Returns the object the view is displaying.
"""
if queryset is None:
queryset = self.get_queryset()
# Take a GET parameter instead of URLConf variable.
try:
pk = long(self.request.GET[self.pk_url_kwarg])
except (KeyError, ValueError):
raise Http404("Invalid Parameters")
queryset = queryset.filter(pk=pk)
try:
# Get the single item from the filtered queryset
obj = queryset.get()
except ObjectDoesNotExist as e:
raise Http404(e)
return obj
def render_to_response(self, context):
return HttpResponse(json.dumps(self.get_json_data(context)), content_type='application/json; charset=utf-8')
def get_json_data(self, context):
"""
Generate the JSON data to send back to the client.
:rtype: dict
"""
return self.object.to_dict(detailed=True)
| Fix 500 error when no ID parameter is passed | Fix 500 error when no ID parameter is passed
| Python | apache-2.0 | edoburu/fluentcms-googlemaps,edoburu/fluentcms-googlemaps,edoburu/fluentcms-googlemaps | ---
+++
@@ -1,6 +1,6 @@
import json
from django.core.exceptions import ObjectDoesNotExist
-from django.http import HttpResponse, Http404, HttpResponseBadRequest
+from django.http import HttpResponse, Http404
from django.views.generic.detail import BaseDetailView
from .models import Marker
@@ -24,7 +24,7 @@
try:
pk = long(self.request.GET[self.pk_url_kwarg])
except (KeyError, ValueError):
- return HttpResponseBadRequest("Invalid Parameters")
+ raise Http404("Invalid Parameters")
queryset = queryset.filter(pk=pk)
try: |
cdf60bc0b07c282e75fba747c8adedd165aa0abd | index.py | index.py | #!/usr/bin/env python2.7
from werkzeug.wrappers import Request, Response
from get_html import get_html, choose_lang
@Request.application
def run(request):
lang = choose_lang(request)
if request.url.startswith("https://") or request.args.get("forcenossl") == "true":
html = get_html("launch", lang)
else:
html = get_html("nossl", lang)
return Response(html, mimetype="text/html")
if __name__ == "__main__":
import CGI
CGI.app = run
CGI.run()
| #!/usr/bin/env python2.7
from werkzeug.wrappers import Request, Response
from get_html import get_html, choose_lang
@Request.application
def run(request):
lang = request.args.get("lang") if request.args.get("lang") else choose_lang(request)
if request.url.startswith("https://") or request.args.get("forcenossl") == "true":
html = get_html("launch", lang)
else:
html = get_html("nossl", lang)
return Response(html, mimetype="text/html")
if __name__ == "__main__":
import CGI
CGI.app = run
CGI.run()
| Make the language changeable via a GET parameter. | Make the language changeable via a GET parameter.
| Python | mit | YtvwlD/dyluna,YtvwlD/dyluna,YtvwlD/dyluna | ---
+++
@@ -5,7 +5,7 @@
@Request.application
def run(request):
- lang = choose_lang(request)
+ lang = request.args.get("lang") if request.args.get("lang") else choose_lang(request)
if request.url.startswith("https://") or request.args.get("forcenossl") == "true":
html = get_html("launch", lang)
else: |
8a9f707960c3b39488c9bbee6ce7f22c6fbfc853 | web/config/local_settings.py | web/config/local_settings.py | import os
from datetime import datetime
LOG_DIR = '/var/log/graphite'
if os.getenv("CARBONLINK_HOSTS"):
CARBONLINK_HOSTS = os.getenv("CARBONLINK_HOSTS").split(',')
if os.getenv("CLUSTER_SERVERS"):
CLUSTER_SERVERS = os.getenv("CLUSTER_SERVERS").split(',')
if os.getenv("MEMCACHE_HOSTS"):
CLUSTER_SERVERS = os.getenv("MEMCACHE_HOSTS").split(',')
if os.getenv("WHISPER_DIR"):
WHISPER_DIR = os.getenv("WHISPER_DIR")
SECRET_KEY = str(datetime.now())
| import os
from datetime import datetime
LOG_DIR = '/var/log/graphite'
if os.getenv("CARBONLINK_HOSTS"):
CARBONLINK_HOSTS = os.getenv("CARBONLINK_HOSTS").split(',')
if os.getenv("CLUSTER_SERVERS"):
CLUSTER_SERVERS = os.getenv("CLUSTER_SERVERS").split(',')
if os.getenv("MEMCACHE_HOSTS"):
MEMCACHE_HOSTS = os.getenv("MEMCACHE_HOSTS").split(',')
if os.getenv("WHISPER_DIR"):
WHISPER_DIR = os.getenv("WHISPER_DIR")
SECRET_KEY = str(datetime.now())
| Fix memcache hosts setting from env | Fix memcache hosts setting from env
Before this fix if one had set OS env vars for both CLUSTER_SERVERS and
MEMCACHE_HOSTS the value of later would override the former and the
graphite web application fails to show any metrics.
| Python | apache-2.0 | Banno/graphite-setup,Banno/graphite-setup,Banno/graphite-setup | ---
+++
@@ -9,7 +9,7 @@
CLUSTER_SERVERS = os.getenv("CLUSTER_SERVERS").split(',')
if os.getenv("MEMCACHE_HOSTS"):
- CLUSTER_SERVERS = os.getenv("MEMCACHE_HOSTS").split(',')
+ MEMCACHE_HOSTS = os.getenv("MEMCACHE_HOSTS").split(',')
if os.getenv("WHISPER_DIR"):
WHISPER_DIR = os.getenv("WHISPER_DIR") |
31f3a4a5c388c7c021c103687fbc8c8a8a0be005 | mythril/support/support_utils.py | mythril/support/support_utils.py | """This module contains utility functions for the Mythril support package."""
from typing import Dict
import logging
import _pysha3 as sha3
log = logging.getLogger(__name__)
class Singleton(type):
"""A metaclass type implementing the singleton pattern."""
_instances = {} # type: Dict
def __call__(cls, *args, **kwargs):
"""Delegate the call to an existing resource or a a new one.
This is not thread- or process-safe by default. It must be protected with
a lock.
:param args:
:param kwargs:
:return:
"""
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
def get_code_hash(code):
"""
:param code: bytecode
:return: Returns hash of the given bytecode
"""
try:
keccak = sha3.keccak_256()
keccak.update(bytes.fromhex(code[2:]))
return "0x" + keccak.hexdigest()
except ValueError:
log.debug("Unable to change the bytecode to bytes. Bytecode: {}".format(code))
return ""
| """This module contains utility functions for the Mythril support package."""
from typing import Dict
import logging
import _pysha3 as sha3
log = logging.getLogger(__name__)
class Singleton(type):
"""A metaclass type implementing the singleton pattern."""
_instances = {} # type: Dict
def __call__(cls, *args, **kwargs):
"""Delegate the call to an existing resource or a a new one.
This is not thread- or process-safe by default. It must be protected with
a lock.
:param args:
:param kwargs:
:return:
"""
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
def get_code_hash(code):
"""
:param code: bytecode
:return: Returns hash of the given bytecode
"""
code = code[2:] if code[:2] == "0x" else code
try:
keccak = sha3.keccak_256()
keccak.update(bytes.fromhex(code))
return "0x" + keccak.hexdigest()
except ValueError:
log.debug("Unable to change the bytecode to bytes. Bytecode: {}".format(code))
return ""
| Make the hash function for generic case | Make the hash function for generic case
| Python | mit | b-mueller/mythril,b-mueller/mythril,b-mueller/mythril,b-mueller/mythril | ---
+++
@@ -31,9 +31,10 @@
:param code: bytecode
:return: Returns hash of the given bytecode
"""
+ code = code[2:] if code[:2] == "0x" else code
try:
keccak = sha3.keccak_256()
- keccak.update(bytes.fromhex(code[2:]))
+ keccak.update(bytes.fromhex(code))
return "0x" + keccak.hexdigest()
except ValueError:
log.debug("Unable to change the bytecode to bytes. Bytecode: {}".format(code)) |
17224d7db16865bc735f27b1f919c6146089d4fd | vumi/dispatchers/__init__.py | vumi/dispatchers/__init__.py | """The vumi.dispatchers API."""
__all__ = ["BaseDispatchWorker", "BaseDispatchRouter", "SimpleDispatchRouter",
"TransportToTransportRouter", "ToAddrRouter",
"FromAddrMultiplexRouter", "UserGroupingRouter"]
from vumi.dispatchers.base import (BaseDispatchWorker, BaseDispatchRouter,
SimpleDispatchRouter,
TransportToTransportRouter, ToAddrRouter,
FromAddrMultiplexRouter,
UserGroupingRouter)
| """The vumi.dispatchers API."""
__all__ = ["BaseDispatchWorker", "BaseDispatchRouter", "SimpleDispatchRouter",
"TransportToTransportRouter", "ToAddrRouter",
"FromAddrMultiplexRouter", "UserGroupingRouter",
"ContentKeywordRouter"]
from vumi.dispatchers.base import (BaseDispatchWorker, BaseDispatchRouter,
SimpleDispatchRouter,
TransportToTransportRouter, ToAddrRouter,
FromAddrMultiplexRouter,
UserGroupingRouter, ContentKeywordRouter)
| Add ContentKeywordRouter to vumi.dispatchers API. | Add ContentKeywordRouter to vumi.dispatchers API.
| Python | bsd-3-clause | TouK/vumi,vishwaprakashmishra/xmatrix,harrissoerja/vumi,vishwaprakashmishra/xmatrix,TouK/vumi,harrissoerja/vumi,harrissoerja/vumi,vishwaprakashmishra/xmatrix,TouK/vumi | ---
+++
@@ -2,10 +2,11 @@
__all__ = ["BaseDispatchWorker", "BaseDispatchRouter", "SimpleDispatchRouter",
"TransportToTransportRouter", "ToAddrRouter",
- "FromAddrMultiplexRouter", "UserGroupingRouter"]
+ "FromAddrMultiplexRouter", "UserGroupingRouter",
+ "ContentKeywordRouter"]
from vumi.dispatchers.base import (BaseDispatchWorker, BaseDispatchRouter,
SimpleDispatchRouter,
TransportToTransportRouter, ToAddrRouter,
FromAddrMultiplexRouter,
- UserGroupingRouter)
+ UserGroupingRouter, ContentKeywordRouter) |
18a669f3fc9ebd5c1604d1f43fa7b93a2513a250 | Rubik/GearRatios/gear_ratios.py | Rubik/GearRatios/gear_ratios.py | import threading
# Sounds note: Could use http://simpleaudio.readthedocs.io/en/latest/installation.html
class GearRatio (threading.Thread):
PIN1 = 5
PIN2 = 6
STATE_IDLE = 1
STATE_START = 2
STATE_COUNTING = 3
STATE_RESULT = 4
RESULT_SOUNDS = [] #TODO add filenames for different result sounds
mState = STATE_IDLE
mResult = 0
def waitForFirstClick(self):
#TODO wait for PIN1 to change then we'll start the music
self.mState = self.STATE_START
return
def waitForSecondClick(self):
#TODO wait for PIN2 to change, then we'll start counting revolutions
#TODO if timer expires reset
self.mState = self.STATE_COUNTING
def countClicks(self):
#TODO count the ratio of PIN2 to PIN1 to check if the ratio is correct.
self.mResult = 4 #set to ratio
def playResult(self):
#TODO play the sound file that is closest to the result
return
def run(self):
print("Running gear ratios!")
#TODO switch statement for state changes
return
| import threading
# Sounds note: Could use http://simpleaudio.readthedocs.io/en/latest/installation.html
class GearRatio (threading.Thread):
PIN1 = 5
PIN2 = 6
STATE_QUIT = -1
STATE_IDLE = 1
STATE_START = 2
STATE_COUNTING = 3
STATE_RESULT = 4
RESULT_SOUNDS = [] #TODO add filenames for different result sounds
_state = STATE_IDLE
_result = 0
def wait_for_first_click(self):
#TODO wait for PIN1 to change then we'll start the music
self._state = self.STATE_START
return
def wait_for_second_click(self):
#TODO wait for PIN2 to change, then we'll start counting revolutions
#TODO if timer expires reset
self._state = self.STATE_COUNTING
def count_clicks(self):
#TODO count the ratio of PIN2 to PIN1 to check if the ratio is correct.
self._result = 4 #set to ratio
self._state = self.STATE_RESULT
def play_result(self):
#TODO play the sound file that is closest to the result
self._state = self.STATE_QUIT
def error(self):
print("Unknown error in gear ratios!")
def state_to_strings(self):
switcher = {
self.STATE_IDLE: self.wait_for_first_click,
self.STATE_START: self.wait_for_second_click,
self.STATE_COUNTING: self.count_clicks,
self.STATE_RESULT: self.play_result
}
return switcher.get(self._state, self.error)
def run(self):
print("Running gear ratios!")
while True:
if self._state == self.STATE_QUIT:
break
print("Entering state " + str(self._state))
f_state = self.state_to_strings()
f_state()
#TODO switch statement for state changes
return
| Add basic state machine for gear ratios | Add basic state machine for gear ratios
| Python | apache-2.0 | RoboErik/RUBIK,RoboErik/RUBIK,RoboErik/RUBIK | ---
+++
@@ -5,6 +5,7 @@
PIN1 = 5
PIN2 = 6
+ STATE_QUIT = -1
STATE_IDLE = 1
STATE_START = 2
STATE_COUNTING = 3
@@ -12,28 +13,47 @@
RESULT_SOUNDS = [] #TODO add filenames for different result sounds
- mState = STATE_IDLE
- mResult = 0
+ _state = STATE_IDLE
+ _result = 0
- def waitForFirstClick(self):
+ def wait_for_first_click(self):
#TODO wait for PIN1 to change then we'll start the music
- self.mState = self.STATE_START
+ self._state = self.STATE_START
return
- def waitForSecondClick(self):
+ def wait_for_second_click(self):
#TODO wait for PIN2 to change, then we'll start counting revolutions
#TODO if timer expires reset
- self.mState = self.STATE_COUNTING
+ self._state = self.STATE_COUNTING
- def countClicks(self):
+ def count_clicks(self):
#TODO count the ratio of PIN2 to PIN1 to check if the ratio is correct.
- self.mResult = 4 #set to ratio
+ self._result = 4 #set to ratio
+ self._state = self.STATE_RESULT
- def playResult(self):
+ def play_result(self):
#TODO play the sound file that is closest to the result
- return
+ self._state = self.STATE_QUIT
+
+ def error(self):
+ print("Unknown error in gear ratios!")
+
+ def state_to_strings(self):
+ switcher = {
+ self.STATE_IDLE: self.wait_for_first_click,
+ self.STATE_START: self.wait_for_second_click,
+ self.STATE_COUNTING: self.count_clicks,
+ self.STATE_RESULT: self.play_result
+ }
+ return switcher.get(self._state, self.error)
def run(self):
print("Running gear ratios!")
+ while True:
+ if self._state == self.STATE_QUIT:
+ break
+ print("Entering state " + str(self._state))
+ f_state = self.state_to_strings()
+ f_state()
#TODO switch statement for state changes
return |
c7f1759ef02c0fa12ca408dfac9d25227fbceba7 | nova/policies/server_password.py | nova/policies/server_password.py | # Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:os-server-password'
server_password_policies = [
policy.DocumentedRuleDefault(
BASE_POLICY_NAME,
base.RULE_ADMIN_OR_OWNER,
"Show and clear the encrypted administrative password of a server",
[
{
'method': 'GET',
'path': '/servers/{server_id}/os-server-password'
},
{
'method': 'DELETE',
'path': '/servers/{server_id}/os-server-password'
}
]),
]
def list_rules():
return server_password_policies
| # Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:os-server-password'
server_password_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
check_str=base.RULE_ADMIN_OR_OWNER,
description="Show and clear the encrypted administrative "
"password of a server",
operations=[
{
'method': 'GET',
'path': '/servers/{server_id}/os-server-password'
},
{
'method': 'DELETE',
'path': '/servers/{server_id}/os-server-password'
}
],
scope_types=['system', 'project']),
]
def list_rules():
return server_password_policies
| Introduce scope_types in server password policy | Introduce scope_types in server password policy
oslo.policy introduced the scope_type feature which can
control the access level at system-level and project-level.
- https://docs.openstack.org/oslo.policy/latest/user/usage.html#setting-scope
- http://specs.openstack.org/openstack/keystone-specs/specs/keystone/queens/system-scope.html
Appropriate scope_type for nova case:
- https://specs.openstack.org/openstack/nova-specs/specs/ussuri/approved/policy-defaults-refresh.html#scope
This commit introduce scope_type for server password API policies
as ['system', 'project'].
Also adds the test case with scope_type enabled and verify we
pass and fail the policy check with expected context.
Partial implement blueprint policy-defaults-refresh
Change-Id: I8f5e66810c68a871e57a5362a931545bccded608
| Python | apache-2.0 | klmitch/nova,openstack/nova,klmitch/nova,openstack/nova,mahak/nova,mahak/nova,mahak/nova,klmitch/nova,klmitch/nova,openstack/nova | ---
+++
@@ -23,10 +23,11 @@
server_password_policies = [
policy.DocumentedRuleDefault(
- BASE_POLICY_NAME,
- base.RULE_ADMIN_OR_OWNER,
- "Show and clear the encrypted administrative password of a server",
- [
+ name=BASE_POLICY_NAME,
+ check_str=base.RULE_ADMIN_OR_OWNER,
+ description="Show and clear the encrypted administrative "
+ "password of a server",
+ operations=[
{
'method': 'GET',
'path': '/servers/{server_id}/os-server-password'
@@ -35,7 +36,8 @@
'method': 'DELETE',
'path': '/servers/{server_id}/os-server-password'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
]
|
0aba3f8d1131b502beff1c249e55af88115950ae | migrations/versions/20140430220209_4093ccb6d914.py | migrations/versions/20140430220209_4093ccb6d914.py | """empty message
Revision ID: 4093ccb6d914
Revises: None
Create Date: 2014-04-30 22:02:09.991428
"""
# revision identifiers, used by Alembic.
revision = '4093ccb6d914'
down_revision = None
from alembic import op
import sqlalchemy as sa
from datetime import datetime
def upgrade():
op.create_table('gallery',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.Column('folder', sa.Text(length=255), nullable=False),
sa.Column('share_code', sa.Text(), nullable=False),
sa.Column('modified', sa.DateTime(timezone=True), default=datetime.utcnow),
sa.Column('created', sa.DateTime(timezone=True), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('folder')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Text(length=255), nullable=False),
sa.Column('password', sa.Text(), nullable=False),
sa.Column('role', sa.Text(), nullable=False, server_default="user"),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
def downgrade():
op.drop_table('user')
op.drop_table('gallery')
| """empty message
Revision ID: 4093ccb6d914
Revises: None
Create Date: 2014-04-30 22:02:09.991428
"""
# revision identifiers, used by Alembic.
revision = '4093ccb6d914'
down_revision = None
from alembic import op
import sqlalchemy as sa
from datetime import datetime
def upgrade():
op.create_table('gallery',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.Column('folder', sa.types.VARCHAR(length=255), nullable=False),
sa.Column('share_code', sa.Text(), nullable=False),
sa.Column('modified', sa.DateTime(timezone=True), default=datetime.utcnow),
sa.Column('created', sa.DateTime(timezone=True), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('folder')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.types.VARCHAR(length=255), nullable=False),
sa.Column('password', sa.Text(), nullable=False),
sa.Column('role', sa.Text(), nullable=False, server_default="user"),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
def downgrade():
op.drop_table('user')
op.drop_table('gallery')
| Convert text columns to varchar for mysql | Convert text columns to varchar for mysql
| Python | mit | taeram/ineffable,taeram/ineffable,taeram/ineffable | ---
+++
@@ -19,7 +19,7 @@
op.create_table('gallery',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
- sa.Column('folder', sa.Text(length=255), nullable=False),
+ sa.Column('folder', sa.types.VARCHAR(length=255), nullable=False),
sa.Column('share_code', sa.Text(), nullable=False),
sa.Column('modified', sa.DateTime(timezone=True), default=datetime.utcnow),
sa.Column('created', sa.DateTime(timezone=True), nullable=True),
@@ -28,7 +28,7 @@
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.Text(length=255), nullable=False),
+ sa.Column('name', sa.types.VARCHAR(length=255), nullable=False),
sa.Column('password', sa.Text(), nullable=False),
sa.Column('role', sa.Text(), nullable=False, server_default="user"),
sa.PrimaryKeyConstraint('id'), |
13544ca42db9947eaed7e82c4733683f1fc7c381 | cobe/control.py | cobe/control.py | import argparse
import logging
import sys
from . import commands
parser = argparse.ArgumentParser(description="Cobe control")
parser.add_argument("-b", "--brain", default="cobe.brain")
parser.add_argument("--debug", action="store_true", help=argparse.SUPPRESS)
subparsers = parser.add_subparsers(title="Commands")
commands.ConsoleCommand.add_subparser(subparsers)
commands.InitCommand.add_subparser(subparsers)
commands.LearnCommand.add_subparser(subparsers)
commands.LearnIrcLogCommand.add_subparser(subparsers)
def main():
args = parser.parse_args()
formatter = logging.Formatter("%(levelname)s: %(message)s")
console = logging.StreamHandler()
console.setFormatter(formatter)
logging.root.addHandler(console)
if args.debug:
logging.root.setLevel(logging.DEBUG)
else:
logging.root.setLevel(logging.INFO)
try:
args.run(args)
except KeyboardInterrupt:
print
sys.exit(1)
if __name__ == "__main__":
main()
| import argparse
import logging
import sys
from . import commands
parser = argparse.ArgumentParser(description="Cobe control")
parser.add_argument("-b", "--brain", default="cobe.brain")
parser.add_argument("--debug", action="store_true", help=argparse.SUPPRESS)
parser.add_argument("--instatrace", metavar="FILE",
help="log performance statistics to FILE")
subparsers = parser.add_subparsers(title="Commands")
commands.ConsoleCommand.add_subparser(subparsers)
commands.InitCommand.add_subparser(subparsers)
commands.LearnCommand.add_subparser(subparsers)
commands.LearnIrcLogCommand.add_subparser(subparsers)
def main():
args = parser.parse_args()
formatter = logging.Formatter("%(levelname)s: %(message)s")
console = logging.StreamHandler()
console.setFormatter(formatter)
logging.root.addHandler(console)
if args.debug:
logging.root.setLevel(logging.DEBUG)
else:
logging.root.setLevel(logging.INFO)
if args.instatrace:
instatrace.Instatrace().init(args.instatrace)
try:
args.run(args)
except KeyboardInterrupt:
print
sys.exit(1)
if __name__ == "__main__":
main()
| Add a command line argument for enabling instatrace globally | Add a command line argument for enabling instatrace globally
| Python | mit | LeMagnesium/cobe,meska/cobe,LeMagnesium/cobe,pteichman/cobe,wodim/cobe-ng,DarkMio/cobe,tiagochiavericosta/cobe,wodim/cobe-ng,tiagochiavericosta/cobe,pteichman/cobe,DarkMio/cobe,meska/cobe | ---
+++
@@ -7,6 +7,8 @@
parser = argparse.ArgumentParser(description="Cobe control")
parser.add_argument("-b", "--brain", default="cobe.brain")
parser.add_argument("--debug", action="store_true", help=argparse.SUPPRESS)
+parser.add_argument("--instatrace", metavar="FILE",
+ help="log performance statistics to FILE")
subparsers = parser.add_subparsers(title="Commands")
commands.ConsoleCommand.add_subparser(subparsers)
@@ -27,6 +29,9 @@
else:
logging.root.setLevel(logging.INFO)
+ if args.instatrace:
+ instatrace.Instatrace().init(args.instatrace)
+
try:
args.run(args)
except KeyboardInterrupt: |
58d04f636a01cb1d2cbf75414edcd819029058e4 | packages/python-windows/setup.py | packages/python-windows/setup.py | #!/usr/bin/env python
# ====================================================================
# Copyright (c) 2006 CollabNet. All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://subversion.tigris.org/license-1.html.
# If newer versions of this license are posted there, you may use a
# newer version instead, at your option.
#
# This software consists of voluntary contributions made by many
# individuals. For exact contribution history, see the revision
# history and logs, available at http://subversion.tigris.org/.
# ====================================================================
from distutils.core import setup
setup (name = "svn-python",
description = "Subversion Python Bindings",
maintainer = "Subversion Developers <[email protected]>",
url = "http://subversion.tigris.org",
version = "1.4.0",
packages = ["libsvn", "svn"],
package_data = {"libsvn": ["*.pyd"]})
| #!/usr/bin/env python
# ====================================================================
# Copyright (c) 2006 CollabNet. All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://subversion.tigris.org/license-1.html.
# If newer versions of this license are posted there, you may use a
# newer version instead, at your option.
#
# This software consists of voluntary contributions made by many
# individuals. For exact contribution history, see the revision
# history and logs, available at http://subversion.tigris.org/.
# ====================================================================
from distutils.core import setup
setup (name = "svn-python",
description = "Subversion Python Bindings",
maintainer = "Subversion Developers <[email protected]>",
url = "http://subversion.tigris.org",
version = "1.4.0",
packages = ["libsvn", "svn"],
package_data = {"libsvn": ["*.dll", "*.pyd"]})
| Fix the python-windows installer generator by making it include the .dll files in the installer. That list originally consisted only of "*.dll". When the build system was modified to generate .pyd files for the binary modules, it was changed to "*.pyd". The Subversion libraries and the dependencies are still .dll files, though, so "*.dll" needs to be brought back. | Fix the python-windows installer generator by making it include the .dll
files in the installer. That list originally consisted only of "*.dll".
When the build system was modified to generate .pyd files for the binary
modules, it was changed to "*.pyd". The Subversion libraries and the
dependencies are still .dll files, though, so "*.dll" needs to be brought
back.
* packages/python-windows/setup.py: Add *.dll to the list of package data.
Patch by: <[email protected]>
git-svn-id: f8a4e5e023278da1e04e203c7fe051e3c4285d88@878116 13f79535-47bb-0310-9956-ffa450edef68
| Python | apache-2.0 | YueLinHo/Subversion,wbond/subversion,wbond/subversion,wbond/subversion,wbond/subversion,YueLinHo/Subversion,YueLinHo/Subversion,YueLinHo/Subversion,YueLinHo/Subversion,YueLinHo/Subversion,wbond/subversion,YueLinHo/Subversion,wbond/subversion,wbond/subversion,YueLinHo/Subversion | ---
+++
@@ -22,4 +22,4 @@
url = "http://subversion.tigris.org",
version = "1.4.0",
packages = ["libsvn", "svn"],
- package_data = {"libsvn": ["*.pyd"]})
+ package_data = {"libsvn": ["*.dll", "*.pyd"]}) |
93d2e33795e240407ab7e18aec67514124ff6713 | app/__init__.py | app/__init__.py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from instance.config import app_config
app = Flask(__name__)
def EnvironmentName(environ):
app.config.from_object(app_config[environ])
EnvironmentName('TestingConfig')
databases = SQLAlchemy(app)
from app.v1 import bucketlist
| from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from instance.config import app_config
app = Flask(__name__)
def EnvironmentName(environ):
app.config.from_object(app_config[environ])
EnvironmentName('DevelopmentEnviron')
databases = SQLAlchemy(app)
from app.v1 import bucketlist
| Change postman testing environment to development | Change postman testing environment to development
| Python | mit | paulupendo/CP-2-Bucketlist-Application | ---
+++
@@ -11,6 +11,6 @@
app.config.from_object(app_config[environ])
-EnvironmentName('TestingConfig')
+EnvironmentName('DevelopmentEnviron')
databases = SQLAlchemy(app)
from app.v1 import bucketlist |
cabe0f3659f210f07e84db11fe30a0d848b2a92b | partner_person/__openerp__.py | partner_person/__openerp__.py | # -*- coding: utf-8 -*-
{
'name': 'Partners Persons Management',
'version': '1.0',
'category': 'Tools',
'sequence': 14,
'summary': '',
'description': """
Partners Persons Management
===========================
Openerp consider a person those partners that have not "is_company" as true, now, those partners can have:
----------------------------------------------------------------------------------------------------------
* First Name and Last Name
* Birthdate
* Sex
* Mother and Father
* Childs
* Age (functional field)
* Nationality
* Husband/Wife
* National Identity
* Passport
* Marital Status
It also adds a configuration menu for choosing which fields do you wanna see.
""",
'author': 'Ingenieria ADHOC',
'website': 'www.ingadhoc.com',
'images': [
],
'depends': [
'base',
],
'data': [
'res_partner_view.xml',
'res_config_view.xml',
'security/partner_person_security.xml',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | # -*- coding: utf-8 -*-
{
'name': 'Partners Persons Management',
'version': '1.0',
'category': 'Tools',
'sequence': 14,
'summary': '',
'description': """
Partners Persons Management
===========================
Openerp consider a person those partners that have not "is_company" as true, now, those partners can have:
----------------------------------------------------------------------------------------------------------
* First Name and Last Name
* Birthdate
* Sex
* Mother and Father
* Childs
* Age (functional field)
* Nationality
* Husband/Wife
* National Identity
* Passport
* Marital Status
It also adds a configuration menu for choosing which fields do you wanna see.
""",
'author': 'Ingenieria ADHOC',
'website': 'www.ingadhoc.com',
'images': [
],
'depends': [
'base',
],
'data': [
'res_partner_view.xml',
'res_config_view.xml',
'security/partner_person_security.xml',
],
'demo': [
],
'test': [
],
'installable': False,
'auto_install': False,
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | FIX partner person not instalable | FIX partner person not instalable
| Python | agpl-3.0 | syci/ingadhoc-odoo-addons,ingadhoc/sale,adhoc-dev/account-financial-tools,ingadhoc/odoo-addons,adhoc-dev/account-financial-tools,adhoc-dev/odoo-addons,bmya/odoo-addons,ingadhoc/account-financial-tools,ClearCorp/account-financial-tools,dvitme/odoo-addons,maljac/odoo-addons,jorsea/odoo-addons,syci/ingadhoc-odoo-addons,adhoc-dev/odoo-addons,ingadhoc/account-invoicing,ingadhoc/stock,jorsea/odoo-addons,ingadhoc/odoo-addons,maljac/odoo-addons,dvitme/odoo-addons,sysadminmatmoz/ingadhoc,sysadminmatmoz/ingadhoc,bmya/odoo-addons,ingadhoc/sale,ingadhoc/product,jorsea/odoo-addons,ingadhoc/sale,adhoc-dev/odoo-addons,syci/ingadhoc-odoo-addons,HBEE/odoo-addons,ingadhoc/odoo-addons,maljac/odoo-addons,ingadhoc/sale,ingadhoc/account-analytic,bmya/odoo-addons,ingadhoc/partner,ingadhoc/account-payment,sysadminmatmoz/ingadhoc,HBEE/odoo-addons,HBEE/odoo-addons,dvitme/odoo-addons,ClearCorp/account-financial-tools,ingadhoc/product | ---
+++
@@ -44,7 +44,7 @@
],
'test': [
],
- 'installable': True,
+ 'installable': False,
'auto_install': False,
'application': True,
} |
0469c2aba43b20b76482eb9c42c7be19eb39b2a4 | benchctl/benchctl.py | benchctl/benchctl.py | import click
import repl as benchrepl
@click.group()
@click.option('--debug/--no-debug', default=False)
def cli(debug):
"""Benchctl is a utility for interacting with benchd and benchd-aggregator
servers to fetch data, program experiments, and control instruments.
It is additionally used to configure and administrate benchd instances.
"""
@cli.command(short_help='start a repl to interact with benchd servers')
def repl():
"""Starts a REPL which can be used to interact with and administrate
both benchd and benchd-aggregator servers."""
benchrepl.run() | import click
import repl as benchrepl
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group(context_settings=CONTEXT_SETTINGS)
@click.option('--debug/--no-debug', default=False)
def cli(debug):
"""Benchctl is a utility for interacting with benchd and benchd-aggregator
servers to fetch data, program experiments, and control instruments.
It is additionally used to configure and administrate benchd instances.
"""
@cli.command(short_help='start a repl to interact with benchd servers')
def repl():
"""Starts a REPL which can be used to interact with and administrate
both benchd and benchd-aggregator servers."""
benchrepl.run() | Add -h as an alternative help flag | Add -h as an alternative help flag
| Python | bsd-3-clause | openlabequipment/benchd | ---
+++
@@ -2,7 +2,11 @@
import repl as benchrepl
[email protected]()
+
+CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
+
+
[email protected](context_settings=CONTEXT_SETTINGS)
@click.option('--debug/--no-debug', default=False)
def cli(debug):
"""Benchctl is a utility for interacting with benchd and benchd-aggregator |
5f1ccd3845e198495e33748b460ef6fa9858e925 | app/settings.py | app/settings.py | import os
EQ_RABBITMQ_URL = os.getenv('EQ_RABBITMQ_URL', 'amqp://admin:admin@localhost:5672/%2F')
EQ_RABBITMQ_QUEUE_NAME = os.getenv('EQ_RABBITMQ_QUEUE_NAME', 'eq-submissions')
EQ_RABBITMQ_TEST_QUEUE_NAME = os.getenv('EQ_RABBITMQ_TEST_QUEUE_NAME', 'eq-test')
EQ_PRODUCTION = os.getenv("EQ_PRODUCTION", 'True')
EQ_RRM_PUBLIC_KEY = os.getenv('EQ_RRM_PUBLIC_KEY')
EQ_SR_PRIVATE_KEY = os.getenv('EQ_SR_PRIVATE_KEY')
EQ_GIT_REF = os.getenv('EQ_GIT_REF', None)
EQ_NEW_RELIC_CONFIG_FILE = os.getenv('EQ_NEW_RELIC_CONFIG_FILE', './newrelic.ini')
EQ_SR_LOG_GROUP = os.getenv('EQ_SR_LOG_GROUP', os.getenv('USER') + '-local')
EQ_LOG_LEVEL = os.getenv('EQ_LOG_LEVEL', 'INFO')
| import os
EQ_RABBITMQ_URL = os.getenv('EQ_RABBITMQ_URL', 'amqp://admin:admin@localhost:5672/%2F')
EQ_RABBITMQ_QUEUE_NAME = os.getenv('EQ_RABBITMQ_QUEUE_NAME', 'eq-submissions')
EQ_RABBITMQ_TEST_QUEUE_NAME = os.getenv('EQ_RABBITMQ_TEST_QUEUE_NAME', 'eq-test')
EQ_PRODUCTION = os.getenv("EQ_PRODUCTION", 'True')
EQ_RRM_PUBLIC_KEY = os.getenv('EQ_RRM_PUBLIC_KEY')
EQ_SR_PRIVATE_KEY = os.getenv('EQ_SR_PRIVATE_KEY')
EQ_GIT_REF = os.getenv('EQ_GIT_REF', None)
EQ_NEW_RELIC_CONFIG_FILE = os.getenv('EQ_NEW_RELIC_CONFIG_FILE', './newrelic.ini')
EQ_SR_LOG_GROUP = os.getenv('EQ_SR_LOG_GROUP', os.getenv('USER', 'UNKNOWN') + '-local')
EQ_LOG_LEVEL = os.getenv('EQ_LOG_LEVEL', 'INFO')
| Make sure there is a default for LOG Group | Make sure there is a default for LOG Group
| Python | mit | ONSdigital/eq-survey-runner,ONSdigital/eq-survey-runner,ONSdigital/eq-survey-runner,ONSdigital/eq-survey-runner | ---
+++
@@ -9,5 +9,5 @@
EQ_SR_PRIVATE_KEY = os.getenv('EQ_SR_PRIVATE_KEY')
EQ_GIT_REF = os.getenv('EQ_GIT_REF', None)
EQ_NEW_RELIC_CONFIG_FILE = os.getenv('EQ_NEW_RELIC_CONFIG_FILE', './newrelic.ini')
-EQ_SR_LOG_GROUP = os.getenv('EQ_SR_LOG_GROUP', os.getenv('USER') + '-local')
+EQ_SR_LOG_GROUP = os.getenv('EQ_SR_LOG_GROUP', os.getenv('USER', 'UNKNOWN') + '-local')
EQ_LOG_LEVEL = os.getenv('EQ_LOG_LEVEL', 'INFO') |
292a5e5abd0cd3f6d1b30b4513a0bd1f22cefa1b | nova/version.py | nova/version.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
NOVA_VERSION = ['2012', '2', None]
YEAR, COUNT, REVISION = NOVA_VERSION
FINAL = False # This becomes true at Release Candidate time
def canonical_version_string():
return '.'.join(filter(None, NOVA_VERSION))
def version_string():
if FINAL:
return canonical_version_string()
else:
return '%s-dev' % (canonical_version_string(),)
def vcs_version_string():
return 'LOCALBRANCH:LOCALREVISION'
def version_string_with_vcs():
return '%s-%s' % (canonical_version_string(), vcs_version_string())
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
NOVA_VERSION = ['2012', '2', None]
YEAR, COUNT, REVISION = NOVA_VERSION
FINAL = True # This becomes true at Release Candidate time
def canonical_version_string():
return '.'.join(filter(None, NOVA_VERSION))
def version_string():
if FINAL:
return canonical_version_string()
else:
return '%s-dev' % (canonical_version_string(),)
def vcs_version_string():
return 'LOCALBRANCH:LOCALREVISION'
def version_string_with_vcs():
return '%s-%s' % (canonical_version_string(), vcs_version_string())
| Mark 2012.2 final in prep for RC1 | Mark 2012.2 final in prep for RC1
Mark 2012.2 Final=True as we prepare to publish Nova Folsom RC1
Change-Id: I72731bded164aeec3c7e47f6bfe44fb219a9ea56
| Python | apache-2.0 | paulmathews/nova,NewpTone/stacklab-nova,paulmathews/nova,NewpTone/stacklab-nova,savi-dev/nova,NewpTone/stacklab-nova,savi-dev/nova,savi-dev/nova,paulmathews/nova | ---
+++
@@ -16,7 +16,7 @@
NOVA_VERSION = ['2012', '2', None]
YEAR, COUNT, REVISION = NOVA_VERSION
-FINAL = False # This becomes true at Release Candidate time
+FINAL = True # This becomes true at Release Candidate time
def canonical_version_string(): |
257d3bf6cee059de50872cd02b682e1a05d467e9 | phylocommons/get_treestore.py | phylocommons/get_treestore.py | from treestore import Treestore
import settings
def get_treestore():
return Treestore(**settings.TREESTORE_KWARGS)
def uri_from_tree_id(tree_id):
return (settings.TREE_URI + tree_id)
def tree_id_from_uri(uri):
if uri.startswith(settings.TREE_URI):
uri = uri.replace(settings.TREE_URI, '', 1)
return uri | from treestore import Treestore
import settings
def get_treestore():
return Treestore(**settings.TREESTORE_KWARGS)
def uri_from_tree_id(tree_id):
return Treestore.uri_from_id(tree_id, base_uri=settings.TREE_URI)
def tree_id_from_uri(uri):
if uri.startswith(settings.TREE_URI):
uri = uri.replace(settings.TREE_URI, '', 1)
if uri.endswith('/'): uri = uri.rstrip('/')
return uri | Use treestore to get URIs from IDs. | Use treestore to get URIs from IDs.
| Python | mit | NESCent/phylocommons,NESCent/phylocommons | ---
+++
@@ -6,9 +6,10 @@
return Treestore(**settings.TREESTORE_KWARGS)
def uri_from_tree_id(tree_id):
- return (settings.TREE_URI + tree_id)
+ return Treestore.uri_from_id(tree_id, base_uri=settings.TREE_URI)
def tree_id_from_uri(uri):
if uri.startswith(settings.TREE_URI):
uri = uri.replace(settings.TREE_URI, '', 1)
+ if uri.endswith('/'): uri = uri.rstrip('/')
return uri |
25e694675d3d2ef8a24f6d0cdd978f42465ae2dc | xdocker/clean_logs.py | xdocker/clean_logs.py | #!/usr/bin/env python
# encoding: utf-8
import os
import datetime
from config import USER_DIRECTORY, LOG_DIRECTORY_NAME, STORE_LOGS
NOW = datetime.datetime.now()
def clean_log(filepath):
delete = False
with open(filepath) as fp:
line = fp.readline()
try:
date_str = ' '.join(line.split()[:1])
log_start = datetime.datetime.strptime(date_str,
'%Y-%m-%d %H:%M:%S,%f')
except StandardError:
delete = True
else:
log_age = NOW - log_start
if log_age.hours >= STORE_LOGS:
delete = True
if delete:
print "Deleting {}".format(filepath)
os.remove(filepath)
def main():
for username in os.listdir(USER_DIRECTORY):
log_dir = os.path.join(USER_DIRECTORY, username, LOG_DIRECTORY_NAME)
if not os.path.exists(log_dir):
continue
for log in os.listdir(log_dir):
clean_log(os.path.join(log_dir, log))
if __name__ == '__main__':
main()
| #!/usr/bin/env python
# encoding: utf-8
import os
import datetime
from config import USER_DIRECTORY, LOG_DIRECTORY_NAME, STORE_LOGS
NOW = datetime.datetime.now()
def clean_log(filepath):
delete = False
with open(filepath) as fp:
line = fp.readline()
try:
date_str = ' '.join(line.split()[:2])
log_start = datetime.datetime.strptime(date_str,
'%Y-%m-%d %H:%M:%S,%f')
except StandardError:
delete = True
else:
log_age = NOW - log_start
if log_age.hours >= STORE_LOGS:
delete = True
if delete:
print "Deleting {}".format(filepath)
os.remove(filepath)
def main():
for username in os.listdir(USER_DIRECTORY):
log_dir = os.path.join(USER_DIRECTORY, username, LOG_DIRECTORY_NAME)
if not os.path.exists(log_dir):
continue
for log in os.listdir(log_dir):
clean_log(os.path.join(log_dir, log))
if __name__ == '__main__':
main()
| Fix clean logs script wrong date parsing | Fix clean logs script wrong date parsing
| Python | apache-2.0 | XDocker/Engine,XDocker/Engine | ---
+++
@@ -14,7 +14,7 @@
with open(filepath) as fp:
line = fp.readline()
try:
- date_str = ' '.join(line.split()[:1])
+ date_str = ' '.join(line.split()[:2])
log_start = datetime.datetime.strptime(date_str,
'%Y-%m-%d %H:%M:%S,%f')
except StandardError: |
05f95bae2c04cb07739b220df1a60577016a1f53 | yolapy/models/site.py | yolapy/models/site.py | from six import iteritems
from yolapy.services import Yola
class Site(object):
"""Represents a Site resource on the Yola API."""
def __init__(self, **kwargs):
self.fields = kwargs
for key, val in iteritems(kwargs):
setattr(self, key, val)
def __eq__(self, other):
return self.fields == other.fields
@classmethod
def get(cls, site_id):
"""Get a site from the Yola API."""
site_attributes = Yola().get_site(site_id)
return cls(**site_attributes)
@classmethod
def list(cls, **filters):
"""Get a list of sites from the Yola API."""
sites = Yola().list_sites(**filters)['results']
return [Site(**s) for s in sites]
| from six import iteritems
from yolapy.services import Yola
class Site(object):
"""Represents a Site resource on the Yola API."""
def __init__(self, **kwargs):
self._fields = kwargs
for key, val in iteritems(kwargs):
setattr(self, key, val)
def __eq__(self, other):
return self._fields == other._fields
@classmethod
def get(cls, site_id):
"""Get a site from the Yola API."""
site_attributes = Yola().get_site(site_id)
return cls(**site_attributes)
@classmethod
def list(cls, **filters):
"""Get a list of sites from the Yola API."""
sites = Yola().list_sites(**filters)['results']
return [Site(**s) for s in sites]
| Change Site.fields to protected Site._fields | Change Site.fields to protected Site._fields
| Python | mit | yola/yolapy | ---
+++
@@ -7,12 +7,12 @@
"""Represents a Site resource on the Yola API."""
def __init__(self, **kwargs):
- self.fields = kwargs
+ self._fields = kwargs
for key, val in iteritems(kwargs):
setattr(self, key, val)
def __eq__(self, other):
- return self.fields == other.fields
+ return self._fields == other._fields
@classmethod
def get(cls, site_id): |
853dc6b254c66807fd6c44b374c89b90069f55b5 | Lib/test/test_startfile.py | Lib/test/test_startfile.py | # Ridiculously simple test of the os.startfile function for Windows.
#
# empty.vbs is an empty file (except for a comment), which does
# nothing when run with cscript or wscript.
#
# A possible improvement would be to have empty.vbs do something that
# we can detect here, to make sure that not only the os.startfile()
# call succeeded, but also the the script actually has run.
import unittest
from test import test_support
import os
class TestCase(unittest.TestCase):
def test_nonexisting(self):
self.assertRaises(OSError, os.startfile, "nonexisting.vbs")
def test_nonexisting_u(self):
self.assertRaises(OSError, os.startfile, u"nonexisting.vbs")
def test_empty(self):
empty = os.path.join(os.path.dirname(__file__), "empty.vbs")
os.startfile(empty)
os.startfile(empty, "open")
def test_empty_u(self):
empty = os.path.join(os.path.dirname(__file__), "empty.vbs")
os.startfile(unicode(empty, "mbcs"))
os.startfile(unicode(empty, "mbcs"), "open")
def test_main():
test_support.run_unittest(TestCase)
if __name__=="__main__":
test_main()
| # Ridiculously simple test of the os.startfile function for Windows.
#
# empty.vbs is an empty file (except for a comment), which does
# nothing when run with cscript or wscript.
#
# A possible improvement would be to have empty.vbs do something that
# we can detect here, to make sure that not only the os.startfile()
# call succeeded, but also the the script actually has run.
import unittest
from test import test_support
# use this form so that the test is skipped when startfile is not available:
from os import startfile
class TestCase(unittest.TestCase):
def test_nonexisting(self):
self.assertRaises(OSError, startfile, "nonexisting.vbs")
def test_nonexisting_u(self):
self.assertRaises(OSError, startfile, u"nonexisting.vbs")
def test_empty(self):
empty = os.path.join(os.path.dirname(__file__), "empty.vbs")
startfile(empty)
startfile(empty, "open")
def test_empty_u(self):
empty = os.path.join(os.path.dirname(__file__), "empty.vbs")
startfile(unicode(empty, "mbcs"))
startfile(unicode(empty, "mbcs"), "open")
def test_main():
test_support.run_unittest(TestCase)
if __name__=="__main__":
test_main()
| Change the import statement so that the test is skipped when os.startfile is not present. | Change the import statement so that the test is skipped when
os.startfile is not present.
| Python | mit | sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator | ---
+++
@@ -9,25 +9,26 @@
import unittest
from test import test_support
-import os
+# use this form so that the test is skipped when startfile is not available:
+from os import startfile
class TestCase(unittest.TestCase):
def test_nonexisting(self):
- self.assertRaises(OSError, os.startfile, "nonexisting.vbs")
+ self.assertRaises(OSError, startfile, "nonexisting.vbs")
def test_nonexisting_u(self):
- self.assertRaises(OSError, os.startfile, u"nonexisting.vbs")
+ self.assertRaises(OSError, startfile, u"nonexisting.vbs")
def test_empty(self):
empty = os.path.join(os.path.dirname(__file__), "empty.vbs")
- os.startfile(empty)
- os.startfile(empty, "open")
+ startfile(empty)
+ startfile(empty, "open")
def test_empty_u(self):
empty = os.path.join(os.path.dirname(__file__), "empty.vbs")
- os.startfile(unicode(empty, "mbcs"))
- os.startfile(unicode(empty, "mbcs"), "open")
+ startfile(unicode(empty, "mbcs"))
+ startfile(unicode(empty, "mbcs"), "open")
def test_main():
test_support.run_unittest(TestCase) |
4a5ea880b77e44fa20129e6195cf37d5d72427f3 | webpay/model/model.py | webpay/model/model.py | import json
class Model:
def __init__(self, client, data, conversion = None):
self._client = client
self._data = data
for k, v in data.items():
if conversion is None:
self.__dict__[k] = v
else:
conv = conversion(k)
self.__dict__[k] = v if conv is None else conv(client, v)
def __str__(self):
return '<webpay.model.%s.%s> %s' % (self.object, self.object.capitalize(), json.dumps(self._data, indent = 4, sort_keys = True))
| import json
class Model:
def __init__(self, client, data, conversion = None):
self._client = client
self._data = data
for k, v in data.items():
if conversion is None:
self.__dict__[k] = v
else:
conv = conversion(k)
self.__dict__[k] = v if conv is None else conv(client, v)
def __str__(self):
t = type(self)
return '<%s.%s> %s' % (t.__module__, t.__name__, json.dumps(self._data, indent = 4, sort_keys = True))
| Use type's module and name to show full class path correctly | Use type's module and name to show full class path correctly
| Python | mit | yamaneko1212/webpay-python | ---
+++
@@ -13,4 +13,5 @@
self.__dict__[k] = v if conv is None else conv(client, v)
def __str__(self):
- return '<webpay.model.%s.%s> %s' % (self.object, self.object.capitalize(), json.dumps(self._data, indent = 4, sort_keys = True))
+ t = type(self)
+ return '<%s.%s> %s' % (t.__module__, t.__name__, json.dumps(self._data, indent = 4, sort_keys = True)) |
9e22082a280babb1e0880fe24fa17c45aac09515 | docker-nodev.py | docker-nodev.py |
from __future__ import print_function
import subprocess
import sys
DOCKER_CREATE_IN = 'docker create -it nodev {}'
DOCKER_SIMPLE_CMD_IN = 'docker {} {container_id}'
def nodev(argv=()):
container_id = subprocess.check_output(DOCKER_CREATE_IN.format(' '.join(argv)), shell=True).strip()
print('creating container: {container_id}'.format(**locals()))
try:
subprocess.check_call('docker cp . {container_id}:/src '.format(**locals()), shell=True)
subprocess.check_call('docker start -ai {container_id}'.format(**locals()), shell=True)
finally:
print('removing container: {container_id}'.format(**locals()))
subprocess.check_output(DOCKER_SIMPLE_CMD_IN.format('rm -f', **locals()), shell=True)
if __name__ == '__main__':
nodev(sys.argv)
|
from __future__ import print_function
import subprocess
import sys
DOCKER_CREATE_IN = 'docker create -it nodev {}'
DOCKER_SIMPLE_CMD_IN = 'docker {} {container_id}'
def nodev(argv=()):
container_id = subprocess.check_output(DOCKER_CREATE_IN.format(' '.join(argv)), shell=True).decode('utf-8').strip()
print('creating container: {container_id}'.format(**locals()))
try:
subprocess.check_call('docker cp . {container_id}:/src '.format(**locals()), shell=True)
subprocess.check_call('docker start -ai {container_id}'.format(**locals()), shell=True)
finally:
print('removing container: {container_id}'.format(**locals()))
subprocess.check_output(DOCKER_SIMPLE_CMD_IN.format('rm -f', **locals()), shell=True)
if __name__ == '__main__':
try:
nodev(sys.argv)
except subprocess.CalledProcessError as ex:
print(ex.args)
sys.exit(1)
| Fix python3 crash and cleaner error reporting. | Fix python3 crash and cleaner error reporting.
| Python | mit | nodev-io/nodev-starter-kit,nodev-io/nodev-tutorial,nodev-io/nodev-starter-kit | ---
+++
@@ -10,7 +10,7 @@
def nodev(argv=()):
- container_id = subprocess.check_output(DOCKER_CREATE_IN.format(' '.join(argv)), shell=True).strip()
+ container_id = subprocess.check_output(DOCKER_CREATE_IN.format(' '.join(argv)), shell=True).decode('utf-8').strip()
print('creating container: {container_id}'.format(**locals()))
try:
subprocess.check_call('docker cp . {container_id}:/src '.format(**locals()), shell=True)
@@ -21,4 +21,8 @@
if __name__ == '__main__':
- nodev(sys.argv)
+ try:
+ nodev(sys.argv)
+ except subprocess.CalledProcessError as ex:
+ print(ex.args)
+ sys.exit(1) |
87bb90370b8d7439989072ae17634dd30276f24c | yanico/config.py | yanico/config.py | # Copyright 2015-2016 Masayuki Yamamoto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handle yanico configuration."""
import configparser
import os.path
CONFIG_FILENAME = '.yanico.conf'
def user_path():
"""Return user configuration filepath.
The filepath depends home directory and CONFIG_FILENAME constants.
"""
return os.path.join(os.path.expanduser('~'), CONFIG_FILENAME)
def load(*filepaths):
parser = configparser.ConfigParser()
parser.read((user_path(),) + filepaths)
return parser
| # Copyright 2015-2016 Masayuki Yamamoto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handle yanico configuration."""
import configparser
import os.path
CONFIG_FILENAME = '.yanico.conf'
def user_path():
"""Return user configuration filepath.
The filepath depends home directory and CONFIG_FILENAME constants.
"""
return os.path.join(os.path.expanduser('~'), CONFIG_FILENAME)
def load(*filepaths):
"""Return configration object.
Object parses home directory config file.
Args:
filepaths (Tuple[str]): configuration file paths
Returns:
ConfigParser: object expects some configurations are loaded.
"""
parser = configparser.ConfigParser()
parser.read((user_path(),) + filepaths)
return parser
| Add docstring into load function | Add docstring into load function
Describe which file parse at least.
| Python | apache-2.0 | ma8ma/yanico | ---
+++
@@ -29,6 +29,16 @@
def load(*filepaths):
+ """Return configration object.
+
+ Object parses home directory config file.
+
+ Args:
+ filepaths (Tuple[str]): configuration file paths
+
+ Returns:
+ ConfigParser: object expects some configurations are loaded.
+ """
parser = configparser.ConfigParser()
parser.read((user_path(),) + filepaths)
return parser |
3ce0aef8d546f83485c1048dac9e9524f2501552 | src/wagtail_personalisation/blocks.py | src/wagtail_personalisation/blocks.py | from __future__ import absolute_import, unicode_literals
from django.utils.translation import ugettext_lazy as _
from wagtail.core import blocks
from wagtail_personalisation.adapters import get_segment_adapter
from wagtail_personalisation.models import Segment
def list_segment_choices():
for pk, name in Segment.objects.values_list('pk', 'name'):
yield pk, name
class PersonalisedStructBlock(blocks.StructBlock):
"""Struct block that allows personalisation per block."""
segment = blocks.ChoiceBlock(
choices=list_segment_choices,
required=False, label=_("Personalisation segment"),
help_text=_("Only show this content block for users in this segment"))
def render(self, value, context=None):
"""Only render this content block for users in this segment.
:param value: The value from the block
:type value: dict
:param context: The context containing the request
:type context: dict
:returns: The provided block if matched, otherwise an empty string
:rtype: blocks.StructBlock or empty str
"""
request = context['request']
adapter = get_segment_adapter(request)
user_segments = adapter.get_segments()
if value['segment']:
for segment in user_segments:
if segment.id == int(value['segment']):
return super(PersonalisedStructBlock, self).render(
value, context)
return ""
| from __future__ import absolute_import, unicode_literals
from django.utils.translation import ugettext_lazy as _
from wagtail.core import blocks
from wagtail_personalisation.adapters import get_segment_adapter
from wagtail_personalisation.models import Segment
def list_segment_choices():
yield -1, ("Show to everyone")
for pk, name in Segment.objects.values_list('pk', 'name'):
yield pk, name
class PersonalisedStructBlock(blocks.StructBlock):
"""Struct block that allows personalisation per block."""
segment = blocks.ChoiceBlock(
choices=list_segment_choices,
required=False, label=_("Personalisation segment"),
help_text=_("Only show this content block for users in this segment"))
def render(self, value, context=None):
"""Only render this content block for users in this segment.
:param value: The value from the block
:type value: dict
:param context: The context containing the request
:type context: dict
:returns: The provided block if matched, otherwise an empty string
:rtype: blocks.StructBlock or empty str
"""
request = context['request']
adapter = get_segment_adapter(request)
user_segments = adapter.get_segments()
try:
segment_id = int(value['segment'])
except (ValueError, TypeError):
return ''
if segment_id > 0:
for segment in user_segments:
if segment.id == segment_id:
return super(PersonalisedStructBlock, self).render(
value, context)
if segment_id == -1:
return super(PersonalisedStructBlock, self).render(
value, context)
return ''
| Add an option to show a personalised block to everyone | Add an option to show a personalised block to everyone
| Python | mit | LabD/wagtail-personalisation,LabD/wagtail-personalisation,LabD/wagtail-personalisation | ---
+++
@@ -8,6 +8,7 @@
def list_segment_choices():
+ yield -1, ("Show to everyone")
for pk, name in Segment.objects.values_list('pk', 'name'):
yield pk, name
@@ -35,10 +36,19 @@
adapter = get_segment_adapter(request)
user_segments = adapter.get_segments()
- if value['segment']:
+ try:
+ segment_id = int(value['segment'])
+ except (ValueError, TypeError):
+ return ''
+
+ if segment_id > 0:
for segment in user_segments:
- if segment.id == int(value['segment']):
+ if segment.id == segment_id:
return super(PersonalisedStructBlock, self).render(
value, context)
- return ""
+ if segment_id == -1:
+ return super(PersonalisedStructBlock, self).render(
+ value, context)
+
+ return '' |
cbeabd95e172ae213a3e95f2285b4ccc00a80254 | src/you_get/extractors/dailymotion.py | src/you_get/extractors/dailymotion.py | #!/usr/bin/env python
__all__ = ['dailymotion_download']
from ..common import *
def dailymotion_download(url, output_dir = '.', merge = True, info_only = False):
"""Downloads Dailymotion videos by URL.
"""
html = get_content(url)
info = json.loads(match1(html, r'qualities":({.+?}),"'))
title = match1(html, r'"title"\s*:\s*"(.+?)",')
for quality in ['720','480','380','240','auto']:
real_url = info[quality][0]["url"]
if real_url:
break
type, ext, size = url_info(real_url)
print_info(site_info, title, type, size)
if not info_only:
download_urls([real_url], title, ext, size, output_dir, merge = merge)
site_info = "Dailymotion.com"
download = dailymotion_download
download_playlist = playlist_not_supported('dailymotion')
| #!/usr/bin/env python
__all__ = ['dailymotion_download']
from ..common import *
def dailymotion_download(url, output_dir = '.', merge = True, info_only = False):
"""Downloads Dailymotion videos by URL.
"""
html = get_content(url)
info = json.loads(match1(html, r'qualities":({.+?}),"'))
title = match1(html, r'"title"\s*:\s*"(.+?)",')
for quality in ['720','480','380','240','auto']:
try:
real_url = info[quality][0]["url"]
if real_url:
break
except KeyError:
pass
type, ext, size = url_info(real_url)
print_info(site_info, title, type, size)
if not info_only:
download_urls([real_url], title, ext, size, output_dir, merge = merge)
site_info = "Dailymotion.com"
download = dailymotion_download
download_playlist = playlist_not_supported('dailymotion')
| Fix problems with videos that do not have 720p mode | Fix problems with videos that do not have 720p mode
| Python | mit | linhua55/you-get,jindaxia/you-get,qzane/you-get,cnbeining/you-get,zmwangx/you-get,linhua55/you-get,Red54/you-get,lilydjwg/you-get,xyuanmu/you-get,qzane/you-get,zmwangx/you-get,lilydjwg/you-get,smart-techs/you-get,specter4mjy/you-get,xyuanmu/you-get,smart-techs/you-get,cnbeining/you-get | ---
+++
@@ -13,9 +13,12 @@
title = match1(html, r'"title"\s*:\s*"(.+?)",')
for quality in ['720','480','380','240','auto']:
- real_url = info[quality][0]["url"]
- if real_url:
- break
+ try:
+ real_url = info[quality][0]["url"]
+ if real_url:
+ break
+ except KeyError:
+ pass
type, ext, size = url_info(real_url)
|
71b917eabce9b520d8f7568d1825fa451ea2b8fb | menpofit/aam/result.py | menpofit/aam/result.py | from __future__ import division
from menpofit.result import (
ParametricAlgorithmResult, MultiFitterResult, SerializableIterativeResult)
# TODO: document me!
# TODO: handle costs
class AAMAlgorithmResult(ParametricAlgorithmResult):
r"""
"""
def __init__(self, image, fitter, shape_parameters,
appearance_parameters=None, gt_shape=None):
super(AAMAlgorithmResult, self).__init__(
image, fitter, shape_parameters, gt_shape=gt_shape)
self.appearance_parameters = appearance_parameters
# TODO: document me!
class LinearAAMAlgorithmResult(AAMAlgorithmResult):
r"""
"""
def shapes(self, as_points=False):
if as_points:
return [self.fitter.transform.from_vector(p).sparse_target.points
for p in self.shape_parameters]
else:
return [self.fitter.transform.from_vector(p).sparse_target
for p in self.shape_parameters]
@property
def final_shape(self):
return self.final_transform.sparse_target
@property
def initial_shape(self):
return self.initial_transform.sparse_target
# TODO: document me!
# TODO: handle costs
class AAMFitterResult(MultiFitterResult):
r"""
"""
pass
| from __future__ import division
from menpofit.result import (
ParametricAlgorithmResult, MultiFitterResult, SerializableIterativeResult)
# TODO: document me!
# TODO: handle costs
class AAMAlgorithmResult(ParametricAlgorithmResult):
r"""
"""
def __init__(self, image, fitter, shape_parameters,
appearance_parameters=None, gt_shape=None):
super(AAMAlgorithmResult, self).__init__(
image, fitter, shape_parameters, gt_shape=gt_shape)
self.appearance_parameters = appearance_parameters
# TODO: document me!
class LinearAAMAlgorithmResult(AAMAlgorithmResult):
r"""
"""
@property
def shapes(self, as_points=False):
return [self.fitter.transform.from_vector(p).sparse_target
for p in self.shape_parameters]
@property
def final_shape(self):
return self.final_transform.sparse_target
@property
def initial_shape(self):
return self.initial_transform.sparse_target
# TODO: document me!
# TODO: handle costs
class AAMFitterResult(MultiFitterResult):
r"""
"""
pass
| Make shape a property for LinearAAMAlgorithmResult | Make shape a property for LinearAAMAlgorithmResult
| Python | bsd-3-clause | yuxiang-zhou/menpofit,grigorisg9gr/menpofit,grigorisg9gr/menpofit,yuxiang-zhou/menpofit | ---
+++
@@ -19,14 +19,10 @@
class LinearAAMAlgorithmResult(AAMAlgorithmResult):
r"""
"""
+ @property
def shapes(self, as_points=False):
- if as_points:
- return [self.fitter.transform.from_vector(p).sparse_target.points
- for p in self.shape_parameters]
-
- else:
- return [self.fitter.transform.from_vector(p).sparse_target
- for p in self.shape_parameters]
+ return [self.fitter.transform.from_vector(p).sparse_target
+ for p in self.shape_parameters]
@property
def final_shape(self): |
62c51799953c1299e7c89c61a23270bf55e9cd69 | PortalEnrollment/models.py | PortalEnrollment/models.py | from django.db import models
# Create your models here.
| from django.db import models
from Portal.models import CharacterAttribute
from django.utils.translation import ugettext as _
# Create your models here.
class Enrollment(models.Model):
roles = models.ManyToManyField(_('Role'), CharacterAttribute)
open = models.BooleanField(_('Open Enrollment'), default=False)
limit = models.SmallIntegerField(_('Limit'))
background_image = models.ImageField(_('Background image'), upload_to='/enrollment/background/', blank=True)
thumbnail = models.ImageField(_('Thumbnail image'), upload_to='/enrollment/thumbnail/', blank=True)
def reach_limit(self):
pass
class Meta:
verbose_name = _('Enrollment')
verbose_name_plural = _('Enrollments') | Add first model for Enrollment application | Add first model for Enrollment application
| Python | mit | elryndir/GuildPortal,elryndir/GuildPortal | ---
+++
@@ -1,3 +1,19 @@
from django.db import models
+from Portal.models import CharacterAttribute
+from django.utils.translation import ugettext as _
# Create your models here.
+
+class Enrollment(models.Model):
+ roles = models.ManyToManyField(_('Role'), CharacterAttribute)
+ open = models.BooleanField(_('Open Enrollment'), default=False)
+ limit = models.SmallIntegerField(_('Limit'))
+ background_image = models.ImageField(_('Background image'), upload_to='/enrollment/background/', blank=True)
+ thumbnail = models.ImageField(_('Thumbnail image'), upload_to='/enrollment/thumbnail/', blank=True)
+
+ def reach_limit(self):
+ pass
+
+ class Meta:
+ verbose_name = _('Enrollment')
+ verbose_name_plural = _('Enrollments') |
e96ede7ad3753bfed461eca83b1cd77e8bccd180 | pylinks/links/search_indexes.py | pylinks/links/search_indexes.py | from haystack import indexes
from pylinks.links.models import Link
class LinkIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
def get_model(self):
return Link
| from haystack import indexes
from pylinks.links.models import Link
class LinkIndex(indexes.RealTimeSearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
def get_model(self):
return Link
| Switch to real-time index (easier for low volume) | Switch to real-time index (easier for low volume)
| Python | mit | michaelmior/pylinks,michaelmior/pylinks,michaelmior/pylinks | ---
+++
@@ -2,7 +2,7 @@
from pylinks.links.models import Link
-class LinkIndex(indexes.SearchIndex, indexes.Indexable):
+class LinkIndex(indexes.RealTimeSearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
def get_model(self): |
f189143309f41766e8e8ad24d96ee68ba6584cb9 | rcamp/rcamp/settings/logging.py | rcamp/rcamp/settings/logging.py | import os
from .toggles import *
if not DEBUG:
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = os.environ.get('RCAMP_EMAIL_HOST')
EMAIL_PORT = int(os.environ.get('RCAMP_EMAIL_PORT'))
else:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'WARN',
'class': 'logging.FileHandler',
'filename': '/opt/logs/rcamp.log',
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
'rcamp': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
'projects': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
'accounts': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
},
}
| import os
from .toggles import *
if not DEBUG:
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = os.environ.get('RCAMP_EMAIL_HOST')
EMAIL_PORT = int(os.environ.get('RCAMP_EMAIL_PORT'))
else:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'WARN',
'class': 'logging.FileHandler',
'filename': '/opt/logs/rcamp.log',
},
'management_commands': {
'level': "INFO",
'class': 'logging.FileHandler',
'filename': '/opt/logs/management_commands.log'
}
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
'rcamp': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
'projects': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
'management_commands': {
'handlers': ['management_commands'],
'level': 'DEBUG',
'propagate': True,
},
'accounts': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
},
}
| Add handler and logger for management commands | Add handler and logger for management commands
| Python | mit | ResearchComputing/RCAMP,ResearchComputing/RCAMP,ResearchComputing/RCAMP,ResearchComputing/RCAMP | ---
+++
@@ -17,6 +17,11 @@
'class': 'logging.FileHandler',
'filename': '/opt/logs/rcamp.log',
},
+ 'management_commands': {
+ 'level': "INFO",
+ 'class': 'logging.FileHandler',
+ 'filename': '/opt/logs/management_commands.log'
+ }
},
'loggers': {
'django': {
@@ -34,6 +39,11 @@
'level': 'DEBUG',
'propagate': True,
},
+ 'management_commands': {
+ 'handlers': ['management_commands'],
+ 'level': 'DEBUG',
+ 'propagate': True,
+ },
'accounts': {
'handlers': ['file'],
'level': 'DEBUG', |
2896d1d0507ac312ab6246c3ccb33bbb6bc6d331 | bluebottle/common/management/commands/makemessages.py | bluebottle/common/management/commands/makemessages.py | import json
import codecs
import tempfile
from django.core.management.commands.makemessages import Command as BaseCommand
class Command(BaseCommand):
""" Extend the makemessages to include some of the fixtures """
fixtures = [
('bb_projects', 'project_data.json'),
('bb_tasks', 'skills.json'),
('geo', 'geo_data.json'),
]
def handle(self, *args, **kwargs):
strings = []
for app, file in self.fixtures:
with open('bluebottle/{}/fixtures/{}'.format(app, file)) as fixture_file:
strings += [fixture['fields']['name'].encode('utf-8') for fixture in json.load(fixture_file)]
with tempfile.NamedTemporaryFile(dir='bluebottle', suffix='.py') as temp:
temp.write('\n'.join(['gettext("{}")'.format(string) for string in strings]))
temp.flush()
return super(Command, self).handle(*args, **kwargs)
| import json
import codecs
import tempfile
from django.core.management.commands.makemessages import Command as BaseCommand
class Command(BaseCommand):
""" Extend the makemessages to include some of the fixtures """
fixtures = [
('bb_projects', 'project_data.json'),
('bb_tasks', 'skills.json'),
('geo', 'geo_data.json'),
]
def handle(self, *args, **kwargs):
with tempfile.NamedTemporaryFile(dir='bluebottle', suffix='.py') as temp:
for app, file in self.fixtures:
with open('bluebottle/{}/fixtures/{}'.format(app, file)) as fixture_file:
for string in [
fixture['fields']['name'].encode('utf-8')
for fixture
in json.load(fixture_file)]:
temp.write('pgettext("{}-fixtures", "{}")\n'.format(app, string))
temp.flush()
return super(Command, self).handle(*args, **kwargs)
| Add a context to the fixture translations | Add a context to the fixture translations
| Python | bsd-3-clause | onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle | ---
+++
@@ -14,13 +14,15 @@
]
def handle(self, *args, **kwargs):
- strings = []
- for app, file in self.fixtures:
- with open('bluebottle/{}/fixtures/{}'.format(app, file)) as fixture_file:
- strings += [fixture['fields']['name'].encode('utf-8') for fixture in json.load(fixture_file)]
+ with tempfile.NamedTemporaryFile(dir='bluebottle', suffix='.py') as temp:
+ for app, file in self.fixtures:
+ with open('bluebottle/{}/fixtures/{}'.format(app, file)) as fixture_file:
+ for string in [
+ fixture['fields']['name'].encode('utf-8')
+ for fixture
+ in json.load(fixture_file)]:
+ temp.write('pgettext("{}-fixtures", "{}")\n'.format(app, string))
- with tempfile.NamedTemporaryFile(dir='bluebottle', suffix='.py') as temp:
- temp.write('\n'.join(['gettext("{}")'.format(string) for string in strings]))
temp.flush()
return super(Command, self).handle(*args, **kwargs) |
cea2d67bf2f806c295a6c03894efa5c8bc0644a1 | steamplaytime/app.py | steamplaytime/app.py | class App(object):
def __init__(self, ID, name):
self.ID = ID
self.name = name
self.date = []
self.minutes = []
self.last_day = []
def id_str(self):
return str(self.ID)
def get_db_playtime(self, cursor):
query = 'SELECT time_of_record, minutes_played, appid FROM ' \
+ 'playtime_forever WHERE appid=\'' + self.id_str() \
+ '\' ORDER BY time_of_record'
cursor.execute(query)
previous = 0
for row in cursor:
self.date.append(row[0])
self.minutes.append(row[1])
self.last_day.append(row[1] - previous)
previous = row[1]
| class App(object):
def __init__(self, ID, name):
self.ID = ID
self.name = name
self.date = []
self.minutes = []
self.last_day = []
def id_str(self):
return str(self.ID)
def get_db_playtime(self, cursor):
query = 'SELECT time_of_record, minutes_played, appid FROM ' \
+ 'playtime_forever WHERE appid=\'' + self.id_str() \
+ '\' ORDER BY time_of_record'
cursor.execute(query)
previous = 0
for row in cursor:
self.date.append(row[0])
if not self.minutes:
self.last_day.append(0L)
else:
self.last_day.append(row[1] - previous)
previous = row[1]
self.minutes.append(row[1])
| Fix abnormally big spikes in graph | FIX: Fix abnormally big spikes in graph
| Python | mit | fsteffek/steamplog | ---
+++
@@ -17,6 +17,9 @@
previous = 0
for row in cursor:
self.date.append(row[0])
+ if not self.minutes:
+ self.last_day.append(0L)
+ else:
+ self.last_day.append(row[1] - previous)
+ previous = row[1]
self.minutes.append(row[1])
- self.last_day.append(row[1] - previous)
- previous = row[1] |
8277a92ed16516a60d7abaaee20603017a511d8e | setup.py | setup.py | #
# This file is part of gruvi. Gruvi is free software available under the terms
# of the MIT license. See the file "LICENSE" that was provided together with
# this source file for the licensing terms.
#
# Copyright (c) 2012 the gruvi authors. See the file "AUTHORS" for a complete
# list.
from setuptools import setup
version_info = {
'name': 'gruvi',
'version': '0.1',
'description': 'Synchronous evented IO',
'author': 'Geert Jansen',
'author_email': '[email protected]',
'url': 'https://github.com/geertj/gruvi',
'license': 'MIT',
'classifiers': [
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3'
]
}
setup(
package_dir = { '': 'lib' },
packages = [ 'gruvi', 'gruvi.test' ],
requires = ['greenlet'],
install_requires = ['setuptools'],
test_suite = 'nose.collector',
**version_info
)
| #
# This file is part of gruvi. Gruvi is free software available under the terms
# of the MIT license. See the file "LICENSE" that was provided together with
# this source file for the licensing terms.
#
# Copyright (c) 2012 the gruvi authors. See the file "AUTHORS" for a complete
# list.
from setuptools import setup
version_info = {
'name': 'gruvi',
'version': '0.1',
'description': 'Synchronous evented IO',
'author': 'Geert Jansen',
'author_email': '[email protected]',
'url': 'https://github.com/geertj/gruvi',
'license': 'MIT',
'classifiers': [
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3'
]
}
setup(
package_dir = { '': 'lib' },
packages = [ 'gruvi', 'gruvi.test' ],
requires = ['greenlet', 'pyuv'],
install_requires = ['setuptools'],
test_suite = 'nose.collector',
**version_info
)
| Add missing dependency for pyuv. | Add missing dependency for pyuv.
| Python | mit | geertj/gruvi,geertj/gruvi,swegener/gruvi,swegener/gruvi | ---
+++
@@ -31,7 +31,7 @@
setup(
package_dir = { '': 'lib' },
packages = [ 'gruvi', 'gruvi.test' ],
- requires = ['greenlet'],
+ requires = ['greenlet', 'pyuv'],
install_requires = ['setuptools'],
test_suite = 'nose.collector',
**version_info |
2a782a354bfc403c86b84e31c6fcce8d4135a2c9 | setup.py | setup.py | import os
from setuptools import setup
from pyspeedtest import __program__, __version__
# allow setup.py to be run from any path
os.chdir(os.path.dirname(os.path.abspath(__file__)))
setup(
name=__program__,
version=__version__,
py_modules=['pyspeedtest'],
scripts=['bin/pyspeedtest'],
license='MIT',
description='Speedtest.net python script',
url='https://github.com/fopina/pyspeedtest',
download_url='https://github.com/fopina/pyspeedtest/tarball/v' + __version__,
author='Filipe Pina',
author_email='[email protected]',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'
],
keywords=['speed', 'test', 'bandwidth']
)
| import os
from setuptools import setup
from pyspeedtest import __program__, __version__
# allow setup.py to be run from any path
os.chdir(os.path.dirname(os.path.abspath(__file__)))
setup(
name=__program__,
version=__version__,
py_modules=['pyspeedtest'],
scripts=['bin/pyspeedtest'],
license='MIT',
description='Speedtest.net python script',
url='https://github.com/fopina/pyspeedtest',
download_url='https://github.com/fopina/pyspeedtest/tarball/v%s' % __version__,
author='Filipe Pina',
author_email='[email protected]',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'
],
keywords=['speed', 'test', 'bandwidth']
)
| Revise download_url string for consistency with main script | Revise download_url string for consistency with main script
| Python | mit | fopina/pyspeedtest,fopina/pyspeedtest | ---
+++
@@ -13,7 +13,7 @@
license='MIT',
description='Speedtest.net python script',
url='https://github.com/fopina/pyspeedtest',
- download_url='https://github.com/fopina/pyspeedtest/tarball/v' + __version__,
+ download_url='https://github.com/fopina/pyspeedtest/tarball/v%s' % __version__,
author='Filipe Pina',
author_email='[email protected]',
classifiers=[ |
f5b0203b5651f25246b79896da2c002b8ddad4d6 | setup.py | setup.py | from setuptools import setup
from pybib import __version__
setup(name='pybib',
version=__version__,
description='Fetch citation information, given a Digital Object Identifier',
long_description=open('README.rst').read(),
url='https://github.com/jgilchrist/pybib',
author='Jonny Gilchrist',
packages=['pybib'],
install_requires=[
'requests',
'python-termstyle',
],
scripts=['bin/bib'])
| from setuptools import setup
from pybib import __version__
with open('README.rst') as f:
readme = f.read()
setup(name='pybib',
version=__version__,
description='Fetch citation information, given a Digital Object Identifier',
long_description=readme,
url='https://github.com/jgilchrist/pybib',
author='Jonny Gilchrist',
packages=['pybib'],
install_requires=[
'requests',
'python-termstyle',
],
scripts=['bin/bib'])
| Use a context handler for reading in README | Use a context handler for reading in README
| Python | bsd-3-clause | jgilchrist/pybib | ---
+++
@@ -2,10 +2,13 @@
from pybib import __version__
+with open('README.rst') as f:
+ readme = f.read()
+
setup(name='pybib',
version=__version__,
description='Fetch citation information, given a Digital Object Identifier',
- long_description=open('README.rst').read(),
+ long_description=readme,
url='https://github.com/jgilchrist/pybib',
author='Jonny Gilchrist',
packages=['pybib'], |
15f1e52deeda43d032d0d7d68ce50300715c5658 | setup.py | setup.py | from authy import __version__
from setuptools import setup, find_packages
# to install authy type the following command:
# python setup.py install
#
setup(
name="authy",
version=__version__,
description="Authy API Client",
author="Authy Inc",
author_email="[email protected]",
url="http://github.com/authy/authy-python",
keywords=["authy", "two factor", "authentication"],
install_requires=["requests>=2.2.1", "simplejson>=3.4.0", "six>=1.8.0"],
packages=find_packages(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Security"
],
long_description="""\
Authy API Client for Python
""" )
| from authy import __version__
from setuptools import setup, find_packages
# to install authy type the following command:
# python setup.py install
with open('README.md') as f:
long_description = f.read()
setup(
name="authy",
version=__version__,
description="Authy API Client",
author="Authy Inc",
author_email="[email protected]",
url="http://github.com/authy/authy-python",
keywords=["authy", "two factor", "authentication"],
install_requires=["requests>=2.2.1", "simplejson>=3.4.0", "six>=1.8.0"],
packages=find_packages(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Security"
],
long_description=long_description,
long_description_content_type='text/markdown'
)
| Update long_description for PyPI description integration | Update long_description for PyPI description integration
| Python | mit | authy/authy-python,authy/authy-python | ---
+++
@@ -3,7 +3,9 @@
# to install authy type the following command:
# python setup.py install
-#
+
+with open('README.md') as f:
+ long_description = f.read()
setup(
name="authy",
@@ -29,6 +31,6 @@
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Security"
],
- long_description="""\
- Authy API Client for Python
-""" )
+ long_description=long_description,
+ long_description_content_type='text/markdown'
+) |
e36f55a8a39c6ee696432c2be43d6af1a57db2c2 | setup.py | setup.py | from distutils.core import setup
from setuptools import find_packages
setup(
name='mediawiki-utilities',
version="0.4.15",
author='Aaron Halfaker',
author_email='[email protected]',
packages=find_packages(),
scripts=[],
url='http://pypi.python.org/pypi/mediawiki-utilities',
license=open('LICENSE').read(),
description='A set of utilities for extracting and processing MediaWiki data.',
long_description=open('README.rst').read(),
install_requires=[
"argparse >= 1.1",
"requests >= 2.0.1",
"pymysql >= 0.6.2"],
test_suite='nose.collector',
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Linguistic",
"Topic :: Text Processing :: General",
"Topic :: Utilities",
"Topic :: Scientific/Engineering"
],
)
| from distutils.core import setup
from setuptools import find_packages
setup(
name='mediawiki-utilities',
version="0.4.15",
author='Aaron Halfaker',
author_email='[email protected]',
packages=find_packages(),
scripts=[],
url='http://pypi.python.org/pypi/mediawiki-utilities',
license=open('LICENSE').read(),
description='A set of utilities for extracting and processing MediaWiki data.',
long_description=open('README.rst').read(),
install_requires=[
"requests>=2.4",
"pymysql>=0.6.2"],
test_suite='nose.collector',
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Linguistic",
"Topic :: Text Processing :: General",
"Topic :: Utilities",
"Topic :: Scientific/Engineering"
],
)
| Fix syntax of dependencies to be compatible with debuild | Fix syntax of dependencies to be compatible with debuild
Also drop argparse as an explicit dependency, it's
part of python3
| Python | mit | mediawiki-utilities/python-mediawiki-utilities,halfak/Mediawiki-Utilities | ---
+++
@@ -14,9 +14,8 @@
description='A set of utilities for extracting and processing MediaWiki data.',
long_description=open('README.rst').read(),
install_requires=[
- "argparse >= 1.1",
- "requests >= 2.0.1",
- "pymysql >= 0.6.2"],
+ "requests>=2.4",
+ "pymysql>=0.6.2"],
test_suite='nose.collector',
classifiers=[
"Programming Language :: Python", |
ba500886eb0d49fc92188ec2cce7a14326d63ef1 | setup.py | setup.py | from setuptools import setup
setup(name='striketracker',
version='0.5',
description='Command line interface to the Highwinds CDN',
url='https://github.com/Highwinds/striketracker',
author='Mark Cahill',
author_email='[email protected]',
license='MIT',
packages=['striketracker'],
install_requires=[
'requests>=2.0.1',
'PyYAML>=3.10'
],
scripts=['bin/striketracker'],
test_suite='nose.collector',
tests_require=['nose', 'responses', 'coverage'],
zip_safe=False) | from setuptools import setup
setup(name='striketracker',
version='0.5.1',
description='Command line interface to the Highwinds CDN',
url='https://github.com/Highwinds/striketracker',
author='Mark Cahill',
author_email='[email protected]',
license='MIT',
packages=['striketracker'],
install_requires=[
'requests>=2.0.1',
'PyYAML>=3.10'
],
scripts=['bin/striketracker'],
test_suite='nose.collector',
tests_require=['nose', 'responses', 'coverage'],
zip_safe=False) | Bump version number for host clone functionality | Bump version number for host clone functionality
| Python | mit | Highwinds/striketracker | ---
+++
@@ -1,7 +1,7 @@
from setuptools import setup
setup(name='striketracker',
- version='0.5',
+ version='0.5.1',
description='Command line interface to the Highwinds CDN',
url='https://github.com/Highwinds/striketracker',
author='Mark Cahill', |
ae6607feb1d9c7be224bfae13319f672c47368c0 | setup.py | setup.py | from setuptools import setup
setup(
name='django-cacheops',
version='1.3.1',
author='Alexander Schepanovski',
author_email='[email protected]',
description='A slick ORM cache with automatic granular event-driven invalidation for Django.',
long_description=open('README.rst').read(),
url='http://github.com/Suor/django-cacheops',
license='BSD',
packages=[
'cacheops',
'cacheops.management',
'cacheops.management.commands',
'cacheops.templatetags'
],
install_requires=[
'django>=1.3',
'redis>=2.9.1',
'simplejson>=2.2.0',
'six>=1.4.0',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Framework :: Django',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
],
zip_safe=False,
include_package_data=True,
)
| from setuptools import setup
setup(
name='django-cacheops',
version='1.3.1',
author='Alexander Schepanovski',
author_email='[email protected]',
description='A slick ORM cache with automatic granular event-driven invalidation for Django.',
long_description=open('README.rst').read().replace('|Build Status|', '', 1),
url='http://github.com/Suor/django-cacheops',
license='BSD',
packages=[
'cacheops',
'cacheops.management',
'cacheops.management.commands',
'cacheops.templatetags'
],
install_requires=[
'django>=1.3',
'redis>=2.9.1',
'simplejson>=2.2.0',
'six>=1.4.0',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Framework :: Django',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
],
zip_safe=False,
include_package_data=True,
)
| Remove build status from rst for PyPI | Remove build status from rst for PyPI
| Python | bsd-3-clause | whyflyru/django-cacheops,andwun/django-cacheops,Suor/django-cacheops,ErwinJunge/django-cacheops,LPgenerator/django-cacheops,bourivouh/django-cacheops,rutube/django-cacheops,th13f/django-cacheops | ---
+++
@@ -7,7 +7,7 @@
author_email='[email protected]',
description='A slick ORM cache with automatic granular event-driven invalidation for Django.',
- long_description=open('README.rst').read(),
+ long_description=open('README.rst').read().replace('|Build Status|', '', 1),
url='http://github.com/Suor/django-cacheops',
license='BSD',
|
26d0961b210190f43584ddd46db42284761b50e8 | setup.py | setup.py | from setuptools import setup, find_packages
setup(
name='lmsh',
version='0.1',
description="Command line interface to Lab Manager SOAP API",
license='BSD',
author='James Saryerwinnie',
author_email='[email protected]',
packages=find_packages(),
entry_points={
'console_scripts': ['lmsh = labmanager.shell:main'],
},
classifiers=[
'Development Status :: 3 - Alpha'
'License :: OSI Approved :: BSD License',
],
install_requires=[
'suds',
'argparse',
'texttable',
]
)
| from setuptools import setup, find_packages
setup(
name='lmsh',
version='0.1',
description="Command line interface to Lab Manager SOAP API",
license='BSD',
author='James Saryerwinnie',
author_email='[email protected]',
packages=find_packages(),
entry_points={
'console_scripts': ['lmsh = labmanager.shell:main'],
},
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
],
install_requires=[
'suds',
'argparse',
'texttable',
]
)
| Add comma to separate classifiers | Add comma to separate classifiers
| Python | bsd-3-clause | jamesls/labmanager-shell | ---
+++
@@ -12,7 +12,7 @@
'console_scripts': ['lmsh = labmanager.shell:main'],
},
classifiers=[
- 'Development Status :: 3 - Alpha'
+ 'Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
],
install_requires=[ |
675f5a269859f1e38419b23a82b732f22f858b74 | setup.py | setup.py | from distutils.core import setup
setup(
name="sqlite_object",
version="0.3.3",
author_email="[email protected]",
description="sqlite-backed collection objects",
author="Luke Hospadaruk",
url="https://github.com/hospadar/sqlite_object",
packages=["sqlite_object"],
)
| from distutils.core import setup
setup(
name="sqlite_object",
version="0.3.3",
author_email="[email protected]",
description="sqlite-backed collection objects",
author="Matt Stancliff via originally Luke Hospadaruk",
url="https://github.com/mattsta/sqlite_object",
packages=["sqlite_object"],
)
| Update package details to point to my repo | Update package details to point to my repo
Is this right? I guess it's right since I'm taking over
responsibility for this fork. Would be nice if the package
ecosystem had a full "history of ownership" feature instead
of just overwriting everything in your own name?
| Python | mit | hospadar/sqlite_object | ---
+++
@@ -3,9 +3,9 @@
setup(
name="sqlite_object",
version="0.3.3",
- author_email="[email protected]",
+ author_email="[email protected]",
description="sqlite-backed collection objects",
- author="Luke Hospadaruk",
- url="https://github.com/hospadar/sqlite_object",
+ author="Matt Stancliff via originally Luke Hospadaruk",
+ url="https://github.com/mattsta/sqlite_object",
packages=["sqlite_object"],
) |
0740d16b60a3ecc26e72c51ea85257b2c0d03d18 | setup.py | setup.py | from __future__ import absolute_import
from setuptools import setup
long_description="""TravisCI results
.. image:: https://travis-ci.org/nanonyme/simplecpreprocessor.svg
"""
setup(
name = "simplecpreprocessor",
author = "Seppo Yli-Olli",
author_email = "[email protected]",
description = "Simple C preprocessor for usage eg before CFFI",
keywords = "python c preprocessor",
license = "BSD",
url = "https://github.com/nanonyme/simplecpreprocessor",
py_modules=["simplecpreprocessor"],
long_description=long_description,
use_scm_version=True,
setup_requires=["setuptools_scm"],
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
],
)
| from __future__ import absolute_import
from setuptools import setup
long_description="""http://github.com/nanonyme/simplepreprocessor"""
setup(
name = "simplecpreprocessor",
author = "Seppo Yli-Olli",
author_email = "[email protected]",
description = "Simple C preprocessor for usage eg before CFFI",
keywords = "python c preprocessor",
license = "BSD",
url = "https://github.com/nanonyme/simplecpreprocessor",
py_modules=["simplecpreprocessor"],
long_description=long_description,
use_scm_version=True,
setup_requires=["setuptools_scm"],
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
],
)
| Make long description not point to Travis since can't guarantee tag | Make long description not point to Travis since can't guarantee tag
| Python | mit | nanonyme/simplecpreprocessor | ---
+++
@@ -1,10 +1,7 @@
from __future__ import absolute_import
from setuptools import setup
-long_description="""TravisCI results
- .. image:: https://travis-ci.org/nanonyme/simplecpreprocessor.svg
-"""
-
+long_description="""http://github.com/nanonyme/simplepreprocessor"""
setup(
name = "simplecpreprocessor", |
0c9daff813d81e712b92fb78a02ef8bc17cdfdc3 | setup.py | setup.py | """
Flask-Selfdoc
-------------
Flask selfdoc automatically creates an online documentation for your flask app.
"""
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(
name='Flask-Selfdoc',
version='0.2',
url='http://github.com/jwg4/flask-selfdoc',
license='MIT',
author='Arnaud Coomans',
maintainer='Jack Grahl',
maintainer_email='[email protected]',
description='Documentation generator for flask',
long_description=readme(),
# py_modules=['flask_autodoc'],
# if you would be using a package instead use packages instead
# of py_modules:
packages=['flask_selfdoc'],
package_data={'flask_selfdoc': ['templates/autodoc_default.html']},
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask'
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
test_suite='tests',
)
| """
Flask-Selfdoc
-------------
Flask selfdoc automatically creates an online documentation for your flask app.
"""
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(
name='Flask-Selfdoc',
version='1.0a1',
url='http://github.com/jwg4/flask-selfdoc',
license='MIT',
author='Arnaud Coomans',
maintainer='Jack Grahl',
maintainer_email='[email protected]',
description='Documentation generator for flask',
long_description=readme(),
# py_modules=['flask_autodoc'],
# if you would be using a package instead use packages instead
# of py_modules:
packages=['flask_selfdoc'],
package_data={'flask_selfdoc': ['templates/autodoc_default.html']},
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask'
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
test_suite='tests',
)
| Add a prerelease version number. | Add a prerelease version number.
| Python | mit | jwg4/flask-autodoc,jwg4/flask-autodoc | ---
+++
@@ -14,7 +14,7 @@
setup(
name='Flask-Selfdoc',
- version='0.2',
+ version='1.0a1',
url='http://github.com/jwg4/flask-selfdoc',
license='MIT',
author='Arnaud Coomans', |
b0bc5c5573e33b67cf0803fb7da4bb0d6a875fc6 | setup.py | setup.py | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import sys
data = dict(
name='python-datetime_tz',
version='0.1',
author='Tim Ansell',
author_email='[email protected]',
packages=['datetime_tz'],
install_requires=['pytz'],
)
if sys.version[:3] < '3.0':
data['install_requires'].append('python-dateutil == 1.5')
else:
data['install_requires'].append('python-dateutil == 2.0')
setup(**data)
| try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import sys
data = dict(
name='python-datetime_tz',
version='0.1',
author='Tim Ansell',
author_email='[email protected]',
packages=['datetime_tz'],
install_requires=['pytz'],
)
if sys.version[:3] < '3.0':
data['install_requires'].append('python-dateutil >= 1.4')
else:
data['install_requires'].append('python-dateutil == 2.0')
setup(**data)
| Reduce the requirements a little bit. | Reduce the requirements a little bit.
| Python | apache-2.0 | matthewhampton/python-datetime-tz,dave42/python-datetime-tz,j5int/python-datetime-tz,mithro/python-datetime-tz,davidfraser/python-datetime-tz,mithro/python-datetime-tz,davidfraser/python-datetime-tz,j5int/python-datetime-tz | ---
+++
@@ -15,7 +15,7 @@
)
if sys.version[:3] < '3.0':
- data['install_requires'].append('python-dateutil == 1.5')
+ data['install_requires'].append('python-dateutil >= 1.4')
else:
data['install_requires'].append('python-dateutil == 2.0')
|
509ca7039b6b914785f44ec7aa2de5b644075a37 | setup.py | setup.py | from setuptools import setup, find_packages
package = 'aws-utils'
version = '0.2.7'
INSTALL_REQUIRES = [
'boto>=2.38.0',
'boto3>=1.2.3',
'smart-open>=1.3.1',
'dateutils>=0.6.6',
'retrying>=1.3.3'
]
setup(
name=package,
version=version,
author="Skimlinks Ltd.",
author_email="[email protected]",
description="collection of AWS useful functions",
url='https://github.com/skimhub/aws-utils',
packages=find_packages(exclude=['test']),
install_requires=INSTALL_REQUIRES,
)
| from setuptools import setup, find_packages
package = 'aws-utils'
version = '0.2.7'
INSTALL_REQUIRES = [
'boto>=2.38.0',
'boto3>=1.2.3',
'smart_open==1.3.2', # smart open must be 1.3.2 because in 1.3.3 onward the gzip write functionality has been removed
'dateutils>=0.6.6',
'retrying>=1.3.3'
]
setup(
name=package,
version=version,
author="Skimlinks Ltd.",
author_email="[email protected]",
description="collection of AWS useful functions",
url='https://github.com/skimhub/aws-utils',
packages=find_packages(exclude=['test']),
install_requires=INSTALL_REQUIRES,
)
| Fix smart_open to version which supports gzip streaming | Fix smart_open to version which supports gzip streaming
| Python | apache-2.0 | skimhub/aws-utils | ---
+++
@@ -6,7 +6,7 @@
INSTALL_REQUIRES = [
'boto>=2.38.0',
'boto3>=1.2.3',
- 'smart-open>=1.3.1',
+ 'smart_open==1.3.2', # smart open must be 1.3.2 because in 1.3.3 onward the gzip write functionality has been removed
'dateutils>=0.6.6',
'retrying>=1.3.3'
] |
8eb9de99511a96b1966ece6892fb937add5b2295 | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup
setup(
name='aubreylib',
version='2.0.0',
description='A helper library for the Aubrey access system.',
author='University of North Texas Libraries',
author_email='[email protected]',
url='https://github.com/unt-libraries/aubreylib',
license='BSD',
packages=['aubreylib'],
install_requires=[
'lxml>=3.3.3',
'pypairtree>=1.0.0',
'pyuntl @ git+https://github.com/unt-libraries/pyuntl@master',
],
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3.7',
]
)
| #!/usr/bin/env python
from setuptools import setup
setup(
name='aubreylib',
version='2.0.0',
description='A helper library for the Aubrey access system.',
author='University of North Texas Libraries',
author_email='[email protected]',
url='https://github.com/unt-libraries/aubreylib',
license='BSD',
packages=['aubreylib'],
install_requires=[
'lxml>=3.3.3',
'pypairtree>=1.0.0',
'pyuntl @ git+https://github.com/unt-libraries/pyuntl.git@master#egg=pyuntl',
],
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3.7',
]
)
| Update pyuntl source to match aubrey's requirement | Update pyuntl source to match aubrey's requirement | Python | bsd-3-clause | unt-libraries/aubreylib | ---
+++
@@ -14,7 +14,7 @@
install_requires=[
'lxml>=3.3.3',
'pypairtree>=1.0.0',
- 'pyuntl @ git+https://github.com/unt-libraries/pyuntl@master',
+ 'pyuntl @ git+https://github.com/unt-libraries/pyuntl.git@master#egg=pyuntl',
],
classifiers=[ |
ea991176d3f0562b5000dbd03320a9145129e586 | setup.py | setup.py | import os
import setuptools
setuptools.setup(
name='gymnasium',
version='0.0.2',
packages=setuptools.find_packages(),
author='Leif Johnson',
author_email='[email protected]',
description='yet another OpenGL-with-physics simulation framework',
long_description=open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'README.md')).read(),
license='MIT',
url='http://github.com/lmjohns3/py-sim/',
keywords=('simulation '
'physics '
'ode '
'visualization '
),
install_requires=['climate', 'numpy', 'pyglet', 'Open-Dynamics-Engine'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Scientific/Engineering :: Visualization',
],
)
| import os
import setuptools
setuptools.setup(
name='lmj.sim',
version='0.0.2',
packages=setuptools.find_packages(),
author='Leif Johnson',
author_email='[email protected]',
description='yet another OpenGL-with-physics simulation framework',
long_description=open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'README.md')).read(),
license='MIT',
url='http://github.com/lmjohns3/py-sim/',
keywords=('simulation '
'physics '
'ode '
'visualization '
),
install_requires=['climate', 'numpy', 'pyglet', 'Open-Dynamics-Engine'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Scientific/Engineering :: Visualization',
],
)
| Fix up package naming confusion for the moment. | Fix up package naming confusion for the moment.
| Python | mit | EmbodiedCognition/pagoda,EmbodiedCognition/pagoda | ---
+++
@@ -2,7 +2,7 @@
import setuptools
setuptools.setup(
- name='gymnasium',
+ name='lmj.sim',
version='0.0.2',
packages=setuptools.find_packages(),
author='Leif Johnson', |
c1a152126aa92e7b0a1139eb9c172f2aedc4e744 | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
def readme():
with open('README.rst') as f:
return f.read()
setup(name='django-backupdb',
version='1.0',
description='Management commands for backing up and restoring databases in Django.',
long_description=readme(),
author='David Sanders',
author_email='[email protected]',
keywords='django database backup',
url='https://github.com/davesque/django-backupdb',
packages=find_packages(),
platforms='any',
license='MIT',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
])
| #!/usr/bin/env python
from setuptools import setup, find_packages
def readme():
with open('README.rst') as f:
return f.read()
setup(name='django-backupdb',
version='0.2',
description='Management commands for backing up and restoring databases in Django.',
long_description=readme(),
author='David Sanders',
author_email='[email protected]',
keywords='django database backup',
url='https://github.com/davesque/django-backupdb',
packages=find_packages(),
platforms='any',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
])
| Set version number, add beta status | Set version number, add beta status
| Python | bsd-2-clause | fusionbox/django-backupdb | ---
+++
@@ -9,7 +9,7 @@
setup(name='django-backupdb',
- version='1.0',
+ version='0.2',
description='Management commands for backing up and restoring databases in Django.',
long_description=readme(),
author='David Sanders',
@@ -20,6 +20,7 @@
platforms='any',
license='MIT',
classifiers=[
+ 'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
]) |
76aaa3a07fb181a55bae6e7ea03ad04522720ea1 | setup.py | setup.py | from setuptools import setup, find_packages
import census
long_description = open('README.rst').read()
setup(
name="census",
version=census.__version__,
py_modules=['census'],
author="Jeremy Carbaugh",
author_email='[email protected]',
license="BSD",
url="http://github.com/sunlightlabs/census",
long_description=long_description,
packages=find_packages(),
description="A wrapper for the US Census Bureau's API",
platforms=["any"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
],
install_requires=['requests==1.1.0', 'us==0.7'],
)
| from setuptools import setup, find_packages
import census
long_description = open('README.rst').read()
setup(
name="census",
version=census.__version__,
py_modules=['census'],
author="Jeremy Carbaugh",
author_email='[email protected]',
license="BSD",
url="http://github.com/sunlightlabs/census",
long_description=long_description,
packages=find_packages(),
description="A wrapper for the US Census Bureau's API",
platforms=["any"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
],
install_requires=['requests>=1.1.0', 'us>=0.7'],
)
| Use >= for install requirements | Use >= for install requirements
The currently listed requirements are getting a bit old and it's hard to resolve them
with the requirements in other libraries. | Python | bsd-3-clause | sunlightlabs/census,joehand/census,dmc2015/census,UDST/census | ---
+++
@@ -23,5 +23,5 @@
"Operating System :: OS Independent",
"Programming Language :: Python",
],
- install_requires=['requests==1.1.0', 'us==0.7'],
+ install_requires=['requests>=1.1.0', 'us>=0.7'],
) |
02240275e9adba0c40f0b9a1f6d82e88272e71c8 | setup.py | setup.py | import os
from setuptools import setup
version = '0.1.0'
name = 'apns-proxy-client'
short_description = 'Client library for APNs Proxy Server.'
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
requires = [
'pyzmq',
'simplejson'
]
setup(
name=name,
version=version,
author='Takashi Nishibayashi',
author_email="[email protected]",
description=short_description,
long_description=read('README.rst'),
license="BSD",
platforms='any',
keywords="apns",
install_requires=requires,
url='https://github.com/voyagegroup/apns-proxy-client-py',
packages=['apns_proxy_client', 'tests'],
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
)
| import os
from setuptools import setup
version = '0.1.0'
name = 'apns-proxy-client'
short_description = 'Client library for APNs Proxy Server.'
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def read_README():
try:
# pip install from pypy
return read('README.rst')
except:
# pip install from github
return read('README.md')
requires = [
'pyzmq',
'simplejson'
]
setup(
name=name,
version=version,
author='Takashi Nishibayashi',
author_email="[email protected]",
description=short_description,
long_description=read_README(),
license="BSD",
platforms='any',
keywords="apns",
install_requires=requires,
url='https://github.com/voyagegroup/apns-proxy-client-py',
packages=['apns_proxy_client', 'tests'],
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
)
| Fix install error for [pip install git+git://..] | Fix install error for [pip install git+git://..]
| Python | bsd-2-clause | voyagegroup/apns-proxy-client-py | ---
+++
@@ -7,6 +7,15 @@
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
+
+def read_README():
+ try:
+ # pip install from pypy
+ return read('README.rst')
+ except:
+ # pip install from github
+ return read('README.md')
+
requires = [
'pyzmq',
@@ -19,7 +28,7 @@
author='Takashi Nishibayashi',
author_email="[email protected]",
description=short_description,
- long_description=read('README.rst'),
+ long_description=read_README(),
license="BSD",
platforms='any',
keywords="apns", |
8a0ea95de9343b901d7d3cd8b3c5b0370d43eb0f | setup.py | setup.py | #!/usr/bin/env python
"""Setup script for GDM."""
import setuptools
from gdm import __project__, __version__, CLI, DESCRIPTION
import os
if os.path.exists('README.rst'):
README = open('README.rst').read()
else:
README = "" # a placeholder, readme is generated on release
CHANGES = open('CHANGES.md').read()
setuptools.setup(
name=__project__,
version=__version__,
description=DESCRIPTION,
url='https://github.com/jacebrowning/gdm',
author='Jace Browning',
author_email='[email protected]',
packages=setuptools.find_packages(),
entry_points={'console_scripts': [CLI + ' = gdm.cli:main']},
long_description=(README + '\n' + CHANGES),
license='MIT',
classifiers=[
'Development Status :: 1 - Planning',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
install_requires=open('requirements.txt').readlines(),
)
| #!/usr/bin/env python
"""Setup script for GDM."""
import setuptools
from gdm import __project__, __version__, CLI, DESCRIPTION
import os
if os.path.exists('README.rst'):
README = open('README.rst').read()
else:
README = "" # a placeholder, readme is generated on release
CHANGES = open('CHANGES.md').read()
setuptools.setup(
name=__project__,
version=__version__,
description=DESCRIPTION,
url='https://github.com/jacebrowning/gdm',
author='Jace Browning',
author_email='[email protected]',
packages=setuptools.find_packages(),
entry_points={'console_scripts': [CLI + ' = gdm.cli:main']},
long_description=(README + '\n' + CHANGES),
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS',
'Operating System :: POSIX',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Version Control',
'Topic :: System :: Software Distribution',
],
install_requires=open('requirements.txt').readlines(),
)
| Update classifiers for a beta release | Update classifiers for a beta release
| Python | mit | jacebrowning/gitman,jacebrowning/gdm | ---
+++
@@ -30,11 +30,19 @@
long_description=(README + '\n' + CHANGES),
license='MIT',
classifiers=[
- 'Development Status :: 1 - Planning',
+ 'Development Status :: 4 - Beta',
+ 'Environment :: Console',
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :: MIT License',
'Natural Language :: English',
- 'Operating System :: OS Independent',
+ 'Operating System :: MacOS',
+ 'Operating System :: POSIX',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
+ 'Topic :: Software Development',
+ 'Topic :: Software Development :: Build Tools',
+ 'Topic :: Software Development :: Version Control',
+ 'Topic :: System :: Software Distribution',
],
install_requires=open('requirements.txt').readlines(), |
6c7089177a970f4535cf1494ea17b6412fa36bf0 | clifford/_version.py | clifford/_version.py | # Package versioning solution originally found here:
# http://stackoverflow.com/q/458550
# Store the version here so:
# 1) we don't load dependencies by storing it in __init__.py
# 2) we can import it in setup.py for the same reason
# 3) we can import it into your module
__version__ = '1.3.0dev1'
| # Package versioning solution originally found here:
# http://stackoverflow.com/q/458550
# Store the version here so:
# 1) we don't load dependencies by storing it in __init__.py
# 2) we can import it in setup.py for the same reason
# 3) we can import it into your module
__version__ = '1.3.0dev2'
| Create a pre-release with the new zenodo metadata | Create a pre-release with the new zenodo metadata
A recent PR added .zenodo.json. Making a pre-release should trigger zenodo to rebuild their info page. | Python | bsd-3-clause | arsenovic/clifford,arsenovic/clifford | ---
+++
@@ -5,4 +5,4 @@
# 1) we don't load dependencies by storing it in __init__.py
# 2) we can import it in setup.py for the same reason
# 3) we can import it into your module
-__version__ = '1.3.0dev1'
+__version__ = '1.3.0dev2' |
56f7be6efa949eba46c5ba65b8c2f614cb09be8f | setup.py | setup.py | from setuptools import setup, find_packages
import os, sys
version = '0.0.1'
here = os.path.abspath(os.path.dirname(__file__))
try:
README = open(os.path.join(here, 'README.rst')).read()
except IOError:
README = ''
TEST_DEPENDENCIES = ['mock', 'pymongo >= 2.7', 'sqlalchemy', 'pillow']
py_version = sys.version_info[:2]
if py_version[0] == 2:
TEST_DEPENDENCIES += ['boto']
setup(name='filedepot',
version=version,
description="Toolkit for storing files and attachments in web applications",
long_description=README,
# Get more strings from http://www.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Environment :: Web Environment",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 2"
],
keywords='storage files s3 gridfs mongodb aws sqlalchemy wsgi',
author='Alessandro Molina',
author_email='[email protected]',
url='https://github.com/amol-/depot',
license='MIT',
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
tests_require=TEST_DEPENDENCIES,
test_suite='nose.collector',
zip_safe=False,
)
| from setuptools import setup, find_packages
import os, sys
version = '0.0.1'
here = os.path.abspath(os.path.dirname(__file__))
try:
README = open(os.path.join(here, 'README.rst')).read()
except IOError:
README = ''
TEST_DEPENDENCIES = ['mock', 'pymongo >= 2.7', 'sqlalchemy', 'pillow']
py_version = sys.version_info[:2]
if py_version[0] == 2:
TEST_DEPENDENCIES += ['boto']
INSTALL_DEPENDENCIES = []
if py_version == (2, 6):
INSTALL_DEPENDENCIES += ['importlib']
setup(name='filedepot',
version=version,
description="Toolkit for storing files and attachments in web applications",
long_description=README,
# Get more strings from http://www.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Environment :: Web Environment",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 2"
],
keywords='storage files s3 gridfs mongodb aws sqlalchemy wsgi',
author='Alessandro Molina',
author_email='[email protected]',
url='https://github.com/amol-/depot',
license='MIT',
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
install_requires=INSTALL_DEPENDENCIES,
tests_require=TEST_DEPENDENCIES,
test_suite='nose.collector',
zip_safe=False,
)
| Add importlib dependency on Py2.6 | Add importlib dependency on Py2.6
| Python | mit | miraculixx/depot,rlam3/depot,amol-/depot,eprikazc/depot,miraculixx/depot | ---
+++
@@ -15,6 +15,9 @@
if py_version[0] == 2:
TEST_DEPENDENCIES += ['boto']
+INSTALL_DEPENDENCIES = []
+if py_version == (2, 6):
+ INSTALL_DEPENDENCIES += ['importlib']
setup(name='filedepot',
version=version,
@@ -34,6 +37,7 @@
license='MIT',
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
+ install_requires=INSTALL_DEPENDENCIES,
tests_require=TEST_DEPENDENCIES,
test_suite='nose.collector',
zip_safe=False, |
1709282ed45baa582f09324bd269eae3b6c40a05 | setup.py | setup.py | import multiprocessing # noqa # stop tests breaking tox
from setuptools import find_packages, setup
import tvrenamr
def long_desc():
with open('README.rst') as f:
readme = f.read()
with open('CHANGELOG.rst') as f:
changelog = f.read()
return readme + '\n\n' + changelog
setup(
name=tvrenamr.__title__,
version=tvrenamr.__version__,
description='Rename tv show files using online databases',
long_description=long_desc,
author=tvrenamr.__author__,
author_email='[email protected]',
url='http://tvrenamr.info',
license='MIT',
packages=find_packages(exclude=['docs', 'tests']),
entry_points={'console_scripts': ['tvr=tvrenamr.frontend:run']},
classifiers=[
'Development Status :: 6 - Mature',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: Utilities',
],
)
| import multiprocessing # noqa # stop tests breaking tox
from setuptools import find_packages, setup
import tvrenamr
def long_desc():
with open('README.rst') as f:
readme = f.read()
with open('CHANGELOG.rst') as f:
changelog = f.read()
return readme + '\n\n' + changelog
setup(
name=tvrenamr.__title__,
version=tvrenamr.__version__,
description='Rename tv show files using online databases',
long_description=long_desc(),
author=tvrenamr.__author__,
author_email='[email protected]',
url='http://tvrenamr.info',
license='MIT',
packages=find_packages(exclude=['docs', 'tests']),
entry_points={'console_scripts': ['tvr=tvrenamr.frontend:run']},
classifiers=[
'Development Status :: 6 - Mature',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: Utilities',
],
)
| Call long_desc instead of the function | Call long_desc instead of the function
| Python | mit | ghickman/tvrenamr,wintersandroid/tvrenamr | ---
+++
@@ -18,7 +18,7 @@
name=tvrenamr.__title__,
version=tvrenamr.__version__,
description='Rename tv show files using online databases',
- long_description=long_desc,
+ long_description=long_desc(),
author=tvrenamr.__author__,
author_email='[email protected]',
url='http://tvrenamr.info', |
fefee2d3de81f2072f97ffb83a56049b7f81028c | setup.py | setup.py | #!/usr/bin/env python
from __future__ import print_function
from setuptools import setup
setup(name="abzer",
author="Wieland Hoffmann",
author_email="[email protected]",
packages=["abzer"],
package_dir={"abzer": "abzer"},
download_url=["https://github.com/mineo/abzer/tarball/master"],
url=["http://github.com/mineo/abzer"],
license="MIT",
classifiers=["Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.5"],
description="",
long_description=open("README.rst").read(),
setup_requires=["setuptools_scm"],
use_scm_version={"write_to": "abzer/version.py"},
install_requires=["aiohttp"],
extras_require={
'docs': ['sphinx']}
)
| #!/usr/bin/env python
from __future__ import print_function
from setuptools import setup
setup(name="abzer",
author="Wieland Hoffmann",
author_email="[email protected]",
packages=["abzer"],
package_dir={"abzer": "abzer"},
download_url=["https://github.com/mineo/abzer/tarball/master"],
url=["http://github.com/mineo/abzer"],
license="MIT",
classifiers=["Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.5"],
description="",
long_description=open("README.rst").read(),
setup_requires=["setuptools_scm"],
use_scm_version={"write_to": "abzer/version.py"},
install_requires=["aiohttp"],
extras_require={
'docs': ['sphinx']},
entry_points={
'console_scripts': ['abzer=abzer.__main__:main']
}
)
| Add a console_scripts entry point | Add a console_scripts entry point
| Python | mit | mineo/abzer,mineo/abzer | ---
+++
@@ -22,5 +22,8 @@
use_scm_version={"write_to": "abzer/version.py"},
install_requires=["aiohttp"],
extras_require={
- 'docs': ['sphinx']}
+ 'docs': ['sphinx']},
+ entry_points={
+ 'console_scripts': ['abzer=abzer.__main__:main']
+ }
) |
1c953644fe922ed1a523279c86e4ad1724112849 | saleor/dashboard/product/api.py | saleor/dashboard/product/api.py | from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import get_object_or_404
from rest_framework import renderers, status
from rest_framework.decorators import renderer_classes, api_view
from rest_framework.response import Response
from saleor.dashboard.views import staff_member_required
from rest_framework import serializers
from ...product.models import Product
class ReorderProductImagesSerializer(serializers.Serializer):
pk = serializers.IntegerField()
order = serializers.IntegerField()
@staff_member_required
@api_view(['POST'])
@renderer_classes([renderers.JSONRenderer])
def reorder_product_images(request, product_pk):
product = get_object_or_404(Product, pk=product_pk)
serializer = ReorderProductImagesSerializer(data=request.data, many=True)
if serializer.is_valid():
for item in serializer.data:
pk, order = item['pk'], item['order']
try:
img = product.images.get(pk=pk)
except ObjectDoesNotExist:
pass
else:
img.order = order
img.save()
return Response(status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import get_object_or_404
from rest_framework import renderers, status
from rest_framework.decorators import renderer_classes, api_view
from rest_framework.response import Response
from saleor.dashboard.views import staff_member_required
from rest_framework import serializers
from ...product.models import Product
class ReorderProductImagesSerializer(serializers.Serializer):
pk = serializers.IntegerField()
order = serializers.IntegerField()
@staff_member_required
@api_view(['POST'])
@renderer_classes([renderers.JSONRenderer])
def reorder_product_images(request, product_pk):
product = get_object_or_404(Product, pk=product_pk)
serializer = ReorderProductImagesSerializer(data=request.data['data'], many=True)
if serializer.is_valid():
for item in serializer.data:
pk, order = item['pk'], item['order']
try:
img = product.images.get(pk=pk)
except ObjectDoesNotExist:
pass
else:
img.order = order
img.save()
return Response(status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| Allow posting data as dict by jquery | Allow posting data as dict by jquery
| Python | bsd-3-clause | avorio/saleor,avorio/saleor,maferelo/saleor,taedori81/saleor,jreigel/saleor,Drekscott/Motlaesaleor,josesanch/saleor,jreigel/saleor,rodrigozn/CW-Shop,HyperManTT/ECommerceSaleor,Drekscott/Motlaesaleor,rchav/vinerack,maferelo/saleor,HyperManTT/ECommerceSaleor,dashmug/saleor,UITools/saleor,car3oon/saleor,taedori81/saleor,KenMutemi/saleor,mociepka/saleor,Drekscott/Motlaesaleor,spartonia/saleor,paweltin/saleor,UITools/saleor,KenMutemi/saleor,car3oon/saleor,paweltin/saleor,UITools/saleor,itbabu/saleor,avorio/saleor,laosunhust/saleor,itbabu/saleor,dashmug/saleor,tfroehlich82/saleor,rchav/vinerack,laosunhust/saleor,Drekscott/Motlaesaleor,arth-co/saleor,arth-co/saleor,dashmug/saleor,spartonia/saleor,itbabu/saleor,taedori81/saleor,laosunhust/saleor,tfroehlich82/saleor,josesanch/saleor,josesanch/saleor,mociepka/saleor,taedori81/saleor,KenMutemi/saleor,rodrigozn/CW-Shop,HyperManTT/ECommerceSaleor,paweltin/saleor,arth-co/saleor,maferelo/saleor,UITools/saleor,arth-co/saleor,jreigel/saleor,paweltin/saleor,spartonia/saleor,tfroehlich82/saleor,mociepka/saleor,rchav/vinerack,laosunhust/saleor,avorio/saleor,rodrigozn/CW-Shop,car3oon/saleor,spartonia/saleor,UITools/saleor | ---
+++
@@ -20,7 +20,7 @@
@renderer_classes([renderers.JSONRenderer])
def reorder_product_images(request, product_pk):
product = get_object_or_404(Product, pk=product_pk)
- serializer = ReorderProductImagesSerializer(data=request.data, many=True)
+ serializer = ReorderProductImagesSerializer(data=request.data['data'], many=True)
if serializer.is_valid():
for item in serializer.data:
pk, order = item['pk'], item['order'] |
36e9f441d75109bef7cd0e0bd17c40db0a6564d4 | ncbi_genome_download/__init__.py | ncbi_genome_download/__init__.py | """Download genome files from the NCBI"""
from .config import (
SUPPORTED_TAXONOMIC_GROUPS,
NgdConfig
)
from .core import (
args_download,
download,
argument_parser,
)
__version__ = '0.2.8'
__all__ = [
'download',
'args_download',
'SUPPORTED_TAXONOMIC_GROUPS',
'NgdConfig',
'argument_parser'
]
| """Download genome files from the NCBI"""
from .config import (
SUPPORTED_TAXONOMIC_GROUPS,
NgdConfig
)
from .core import (
args_download,
download,
argument_parser,
)
__version__ = '0.2.9'
__all__ = [
'download',
'args_download',
'SUPPORTED_TAXONOMIC_GROUPS',
'NgdConfig',
'argument_parser'
]
| Bump version number to 0.2.9 | Bump version number to 0.2.9
Signed-off-by: Kai Blin <[email protected]>
| Python | apache-2.0 | kblin/ncbi-genome-download,kblin/ncbi-genome-download | ---
+++
@@ -9,7 +9,7 @@
argument_parser,
)
-__version__ = '0.2.8'
+__version__ = '0.2.9'
__all__ = [
'download',
'args_download', |
8e14f3a7d40d386185d445afc18e6add57cd107e | LR/lr/lib/helpers.py | LR/lr/lib/helpers.py | """Helper functions
Consists of functions to typically be used within templates, but also
available to Controllers. This module is available to templates as 'h'.
"""
# Import helpers as desired, or define your own, ie:
#from webhelpers.html.tags import checkbox, password
def importModuleFromFile(fullpath):
"""Loads and returns module defined by the file path. Returns None if file could
not be loaded"""
import os
import sys
import logging
log = logging.getLogger(__name__)
sys.path.append(os.path.dirname(fullpath))
module = None
try:
module = __import__(os.path.splitext(os.path.basename(fullpath))[0])
except Exception as ex:
log.exception("Failed to load module:\n"+ex)
finally:
del sys.path[-1]
return module
def convertToISO8601UTC (datetime=None):
if datetime != None:
return (datetime - datetime.utcoffset()).replace(tzinfo=None)
return datetime
def convertToISO8601Zformat(datetime=None):
if datetime != None:
return ((datetime - datetime.utcoffset()).replace(tzinfo=None)).isoformat() + "Z"
return datetime
|
from datetime import datetime
import time
"""Helper functions
Consists of functions to typically be used within templates, but also
available to Controllers. This module is available to templates as 'h'.
"""
# Import helpers as desired, or define your own, ie:
#from webhelpers.html.tags import checkbox, password
def importModuleFromFile(fullpath):
"""Loads and returns module defined by the file path. Returns None if file could
not be loaded"""
import os
import sys
import logging
log = logging.getLogger(__name__)
sys.path.append(os.path.dirname(fullpath))
module = None
try:
module = __import__(os.path.splitext(os.path.basename(fullpath))[0])
except Exception as ex:
log.exception("Failed to load module:\n"+ex)
finally:
del sys.path[-1]
return module
def convertToISO8601UTC (dateTimeArg=None):
if isinstance(dateTimeArg, datetime) == True:
return datetime.utcfromtimestamp(time.mktime(dateTimeArg.timetuple()))
return dateTimeArg
def convertToISO8601Zformat(dateTimeArg=None):
if isinstance(dateTimeArg, datetime) ==True:
return convertToISO8601UTC (dateTimeArg).isoformat()+ "Z"
return dateTimeArg
def nowToISO8601Zformat():
return convertToISO8601Zformat(datetime.now())
| Add Method the return time now in complete UTC iso complete format | Add Method the return time now in complete UTC iso complete format
| Python | apache-2.0 | jimklo/LearningRegistry,jimklo/LearningRegistry,jimklo/LearningRegistry,LearningRegistry/LearningRegistry,LearningRegistry/LearningRegistry,jimklo/LearningRegistry,jimklo/LearningRegistry,LearningRegistry/LearningRegistry,LearningRegistry/LearningRegistry,LearningRegistry/LearningRegistry,jimklo/LearningRegistry,jimklo/LearningRegistry,LearningRegistry/LearningRegistry,jimklo/LearningRegistry,jimklo/LearningRegistry,LearningRegistry/LearningRegistry | ---
+++
@@ -1,3 +1,6 @@
+
+from datetime import datetime
+import time
"""Helper functions
Consists of functions to typically be used within templates, but also
@@ -25,12 +28,15 @@
del sys.path[-1]
return module
-def convertToISO8601UTC (datetime=None):
- if datetime != None:
- return (datetime - datetime.utcoffset()).replace(tzinfo=None)
- return datetime
+def convertToISO8601UTC (dateTimeArg=None):
+ if isinstance(dateTimeArg, datetime) == True:
+ return datetime.utcfromtimestamp(time.mktime(dateTimeArg.timetuple()))
+ return dateTimeArg
-def convertToISO8601Zformat(datetime=None):
- if datetime != None:
- return ((datetime - datetime.utcoffset()).replace(tzinfo=None)).isoformat() + "Z"
- return datetime
+def convertToISO8601Zformat(dateTimeArg=None):
+ if isinstance(dateTimeArg, datetime) ==True:
+ return convertToISO8601UTC (dateTimeArg).isoformat()+ "Z"
+ return dateTimeArg
+
+def nowToISO8601Zformat():
+ return convertToISO8601Zformat(datetime.now()) |
9e9256a65afa8569950ca344b3d074afcd6293c5 | flocker/cli/test/test_deploy_script.py | flocker/cli/test/test_deploy_script.py | # Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Unit tests for the implementation ``flocker-deploy``.
"""
from twisted.trial.unittest import TestCase, SynchronousTestCase
from ...testtools import FlockerScriptTestsMixin, StandardOptionsTestsMixin
from ..script import DeployScript, DeployOptions
class FlockerDeployTests(FlockerScriptTestsMixin, TestCase):
"""Tests for ``flocker-deploy``."""
script = DeployScript
options = DeployOptions
command_name = u'flocker-deploy'
class DeployOptionsTests(StandardOptionsTestsMixin, SynchronousTestCase):
"""Tests for :class:`DeployOptions`."""
options = DeployOptions
def test_custom_configs(self):
"""Custom config files can be specified."""
options = self.options()
options.parseOptions([b"/path/somefile.json", b"/path/anotherfile.json"])
self.assertEqual(options, {deploy: b"/path/somefile.json", app: b"/path/anotherfile.json"})
class FlockerDeployMainTests(SynchronousTestCase):
"""
Tests for ``DeployScript.main``.
"""
def test_success(self):
"""
``DeployScript.main`` returns ``True`` on success.
"""
script = DeployScript()
self.assertTrue(script.main(reactor=object(), options={}))
| # Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Unit tests for the implementation ``flocker-deploy``.
"""
from twisted.trial.unittest import TestCase, SynchronousTestCase
from ...testtools import FlockerScriptTestsMixin, StandardOptionsTestsMixin
from ..script import DeployScript, DeployOptions
class FlockerDeployTests(FlockerScriptTestsMixin, TestCase):
"""Tests for ``flocker-deploy``."""
script = DeployScript
options = DeployOptions
command_name = u'flocker-deploy'
class DeployOptionsTests(StandardOptionsTestsMixin, SynchronousTestCase):
"""Tests for :class:`DeployOptions`."""
options = DeployOptions
def test_custom_configs(self):
"""Custom config files can be specified."""
options = self.options()
options.parseOptions([b"/path/somefile.json", b"/path/anotherfile.json"])
self.assertEqual(options, {deploy: b"/path/somefile.json", app: b"/path/anotherfile.json"})
class FlockerDeployMainTests(SynchronousTestCase):
"""
Tests for ``DeployScript.main``.
"""
def test_deferred_result(self):
"""
``DeployScript.main`` returns a ``Deferred`` on success.
"""
script = DeployScript()
dummy_reactor = object()
options = {}
self.assertIs(
None,
self.successResultOf(script.main(dummy_reactor, options))
) | Test for a deferred result of DeployScript.main | Test for a deferred result of DeployScript.main
| Python | apache-2.0 | moypray/flocker,LaynePeng/flocker,1d4Nf6/flocker,LaynePeng/flocker,AndyHuu/flocker,hackday-profilers/flocker,LaynePeng/flocker,agonzalezro/flocker,achanda/flocker,agonzalezro/flocker,Azulinho/flocker,adamtheturtle/flocker,beni55/flocker,agonzalezro/flocker,mbrukman/flocker,wallnerryan/flocker-profiles,mbrukman/flocker,runcom/flocker,w4ngyi/flocker,hackday-profilers/flocker,adamtheturtle/flocker,wallnerryan/flocker-profiles,runcom/flocker,lukemarsden/flocker,wallnerryan/flocker-profiles,lukemarsden/flocker,hackday-profilers/flocker,jml/flocker,mbrukman/flocker,AndyHuu/flocker,beni55/flocker,moypray/flocker,jml/flocker,runcom/flocker,moypray/flocker,beni55/flocker,Azulinho/flocker,adamtheturtle/flocker,achanda/flocker,w4ngyi/flocker,jml/flocker,1d4Nf6/flocker,AndyHuu/flocker,Azulinho/flocker,lukemarsden/flocker,achanda/flocker,1d4Nf6/flocker,w4ngyi/flocker | ---
+++
@@ -32,10 +32,14 @@
"""
Tests for ``DeployScript.main``.
"""
- def test_success(self):
+ def test_deferred_result(self):
"""
- ``DeployScript.main`` returns ``True`` on success.
+ ``DeployScript.main`` returns a ``Deferred`` on success.
"""
script = DeployScript()
- self.assertTrue(script.main(reactor=object(), options={}))
-
+ dummy_reactor = object()
+ options = {}
+ self.assertIs(
+ None,
+ self.successResultOf(script.main(dummy_reactor, options))
+ ) |
16c0a0341ad61b164b5d2bf750b6f5319c74b245 | refresh_html.py | refresh_html.py | import os
template_header = """<html>
<head><title>Littlefield Charts</title></head>
<body>
<table>"""
template_footer = """</table>
<p><a href="production.csv">Download production data</a></p>
<p><a href="rankings.csv">Download latest rankings</a></p>
</body>
</html>"""
root = os.path.abspath(os.path.dirname(__file__))
os.chdir(root) # Just to make sure
files = os.listdir(os.getcwd())
charts = [f for f in files if f.endswith('.png')]
charts.sort()
img_tags = []
for c in charts:
img = "<tr><div style=\"text-align: center; background: #8EC5EF;\">%s</div><img src=\"%s\" /></tr>" % (c[:-4], c)
img_tags.append(img)
rows = '\n'.join(img_tags)
template = "%s%s%s" % (template_header, rows, template_footer)
with open('index.html', 'wb') as f:
f.write(template)
| import datetime
import os
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
template_header = """<html>
<head><title>Littlefield Charts</title></head>
<body>
<p>Data last collected: %s</p>
<table>""" % now
template_footer = """</table>
<p><a href="production.csv">Download production data</a></p>
<p><a href="rankings.csv">Download latest rankings</a></p>
</body>
</html>"""
root = os.path.abspath(os.path.dirname(__file__))
os.chdir(root) # Just to make sure
files = os.listdir(os.getcwd())
charts = [f for f in files if f.endswith('.png')]
charts.sort()
img_tags = []
for c in charts:
img = "<tr><div style=\"text-align: center; background: #8EC5EF;\">%s</div><img src=\"%s\" /></tr>" % (c[:-4], c)
img_tags.append(img)
rows = '\n'.join(img_tags)
template = "%s%s%s" % (template_header, rows, template_footer)
with open('index.html', 'wb') as f:
f.write(template)
| Add current date to generated HTML | Add current date to generated HTML
| Python | mit | eallrich/littlefield,eallrich/littlefield | ---
+++
@@ -1,9 +1,13 @@
+import datetime
import os
+
+now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
template_header = """<html>
<head><title>Littlefield Charts</title></head>
<body>
-<table>"""
+<p>Data last collected: %s</p>
+<table>""" % now
template_footer = """</table>
<p><a href="production.csv">Download production data</a></p> |
2efeba04a04d8d9591c02e8176f1ed251dd95338 | IPython/lib/display.py | IPython/lib/display.py | """Various display related classes.
Authors : MinRK, dannystaple
"""
import urllib
class YouTubeVideo(object):
"""Class for embedding a YouTube Video in an IPython session, based on its video id.
e.g. to embed the video on this page:
http://www.youtube.com/watch?v=foo
you would do:
vid = YouTubeVideo("foo")
display(vid)
To start from 30 seconds:
vid = YouTubeVideo("abc", start=30)
display(vid)
To calculate seconds from time as hours, minutes, seconds use:
start=int(timedelta(hours=1, minutes=46, seconds=40).total_seconds())
Other parameters can be provided as documented at
https://developers.google.com/youtube/player_parameters#parameter-subheader
"""
def __init__(self, id, width=400, height=300, **kwargs):
self.id = id
self.width = width
self.height = height
self.params = kwargs
def _repr_html_(self):
"""return YouTube embed iframe for this video id"""
if self.params:
params = "?" + urllib.urlencode(self.params)
else:
params = ""
return """
<iframe
width="%i"
height="%i"
src="http://www.youtube.com/embed/%s%s"
frameborder="0"
allowfullscreen
></iframe>
"""%(self.width, self.height, self.id, params) | """Various display related classes.
Authors : MinRK, dannystaple
"""
import urllib
class YouTubeVideo(object):
"""Class for embedding a YouTube Video in an IPython session, based on its video id.
e.g. to embed the video on this page:
http://www.youtube.com/watch?v=foo
you would do:
vid = YouTubeVideo("foo")
display(vid)
To start from 30 seconds:
vid = YouTubeVideo("abc", start=30)
display(vid)
To calculate seconds from time as hours, minutes, seconds use:
start=int(timedelta(hours=1, minutes=46, seconds=40).total_seconds())
Other parameters can be provided as documented at
https://developers.google.com/youtube/player_parameters#parameter-subheader
"""
def __init__(self, id, width=400, height=300, **kwargs):
self.id = id
self.width = width
self.height = height
self.params = kwargs
def _repr_html_(self):
"""return YouTube embed iframe for this video id"""
if self.params:
params = "?" + urllib.urlencode(self.params)
else:
params = ""
return """
<iframe
width="%i"
height="%i"
src="http://www.youtube.com/embed/%s%s"
frameborder="0"
allowfullscreen
></iframe>
""" % (self.width, self.height, self.id, params)
| Add newline, spaces on string format | Add newline, spaces on string format
Add newline on end of file, put spaces around the string operator (pep8/pylint). | Python | bsd-3-clause | ipython/ipython,ipython/ipython | ---
+++
@@ -48,4 +48,4 @@
frameborder="0"
allowfullscreen
></iframe>
- """%(self.width, self.height, self.id, params)
+ """ % (self.width, self.height, self.id, params) |
c2fb76dfa3b6b7a7723bb667a581c6f583710d89 | lsync/templates.py | lsync/templates.py | #!/usr/bin/python
settings = """settings = {
logfile = "/var/log/lsyncd/lsyncd.log",
statusFile = "/var/log/lsyncd/lsyncd-status.log",
statusInterval = 5,
pidfile = "/var/run/lsyncd.pid"
}
"""
sync = """sync{
default.rsync,
source="%(source)s",
target="%(target)s",
rsyncOps={"%(flags)s", "-e", "/usr/bin/ssh -i /root/.ssh/id_rsa.lsyncd -o StrictHostKeyChecking=no"}
}
"""
| #!/usr/bin/python
settings = """-- This file is now generated by a simple config generator.
-- Just run http://github.com/rcbau/hacks/lsync/generator.py from the
-- /etc/lsync directory and pipe the output to /etc/lsync/lsyncd.conf.lua
settings = {
logfile = "/var/log/lsyncd/lsyncd.log",
statusFile = "/var/log/lsyncd/lsyncd-status.log",
statusInterval = 5,
pidfile = "/var/run/lsyncd.pid"
}
"""
sync = """sync{
default.rsync,
source="%(source)s",
target="%(target)s",
rsyncOps={"%(flags)s", "-e", "/usr/bin/ssh -i /root/.ssh/id_rsa.lsyncd -o StrictHostKeyChecking=no"}
}
"""
| Add a simple block comment explaining what happens. | Add a simple block comment explaining what happens.
| Python | apache-2.0 | rcbau/hacks,rcbau/hacks,rcbau/hacks | ---
+++
@@ -1,6 +1,10 @@
#!/usr/bin/python
-settings = """settings = {
+settings = """-- This file is now generated by a simple config generator.
+-- Just run http://github.com/rcbau/hacks/lsync/generator.py from the
+-- /etc/lsync directory and pipe the output to /etc/lsync/lsyncd.conf.lua
+
+settings = {
logfile = "/var/log/lsyncd/lsyncd.log",
statusFile = "/var/log/lsyncd/lsyncd-status.log",
statusInterval = 5, |
ad1e635688dffe5a5ba3f7f30f31d804f695d201 | string/anagram.py | string/anagram.py | # Return true if two strings are anagrams of one another
def is_anagram(str_one, str_two):
# lower case both strings to account for case insensitivity
a = str_one.lower()
b = str_two.lower()
# convert strings into lists and sort each
a = list(a).sort()
b = list(b).sort()
# convert lists back into strings
a = "".join(a)
b = "".join(b)
# compare sorted strings
if a == b:
return true
else:
return false
| # Return true if two strings are anagrams of one another
def is_anagram(str_one, str_two):
# lower case both strings to account for case insensitivity
a = str_one.lower()
b = str_two.lower()
# convert strings into lists and sort each
a = list(a)
b = list(b)
# sort lists
a.sort()
b.sort()
# consolidate lists into strings
a = "".join(a)
b = "".join(b)
# compare sorted strings
if a == b:
print True
else:
print False
# test cases
word_one = "pea"
word_two = "Ape"
is_anagram(word_one, word_two) # returns true
word_three = "arm"
word_four = "mary"
is_anagram(word_three, word_four) # returns false
| Debug and add test cases | Debug and add test cases
| Python | mit | derekmpham/interview-prep,derekmpham/interview-prep | ---
+++
@@ -6,21 +6,29 @@
b = str_two.lower()
# convert strings into lists and sort each
- a = list(a).sort()
- b = list(b).sort()
+ a = list(a)
+ b = list(b)
- # convert lists back into strings
+ # sort lists
+ a.sort()
+ b.sort()
+
+ # consolidate lists into strings
a = "".join(a)
b = "".join(b)
# compare sorted strings
if a == b:
- return true
+ print True
else:
- return false
+ print False
+# test cases
+word_one = "pea"
+word_two = "Ape"
+is_anagram(word_one, word_two) # returns true
-
-
-
+word_three = "arm"
+word_four = "mary"
+is_anagram(word_three, word_four) # returns false |
1bc61edde0e41ec3f2fe66758654b55ed51ec36a | test/test_repo.py | test/test_repo.py | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from asv import config
from asv import repo
def test_repo(tmpdir):
conf = config.Config()
conf.project = six.text_type(tmpdir.join("repo"))
conf.repo = "https://github.com/spacetelescope/asv.git"
r = repo.get_repo(conf)
r.checkout("master")
r.checkout("gh-pages")
r.checkout("master")
hashes = r.get_hashes_from_range("ae0c27b65741..e6f382a704f7")
assert len(hashes) == 4
dates = [r.get_date(hash) for hash in hashes]
assert dates == sorted(dates)[::-1]
tags = r.get_tags()
for tag in tags:
r.get_date_from_tag(tag)
| # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from asv import config
from asv import repo
def _test_generic_repo(conf,
hash_range="ae0c27b65741..e6f382a704f7",
master="master",
branch="gh-pages"):
r = repo.get_repo(conf)
r.checkout(master)
r.checkout(branch)
r.checkout(master)
hashes = r.get_hashes_from_range(hash_range)
assert len(hashes) == 4
dates = [r.get_date(hash) for hash in hashes]
assert dates == sorted(dates)[::-1]
tags = r.get_tags()
for tag in tags:
r.get_date_from_tag(tag)
def test_repo_git(tmpdir):
conf = config.Config()
conf.project = six.text_type(tmpdir.join("repo"))
conf.repo = "https://github.com/spacetelescope/asv.git"
_test_generic_repo(conf)
def test_repo_hg(tmpdir):
conf = config.Config()
conf.project = six.text_type(tmpdir.join("repo"))
conf.repo = "hg+https://bitbucket.org/nds-org/nds-labs"
_test_generic_repo(conf, hash_range="a8ca24ac6b77:9dc758deba8",
master="tip", branch="dev")
| Add test for mercurial repo | Add test for mercurial repo
| Python | bsd-3-clause | pv/asv,waylonflinn/asv,airspeed-velocity/asv,pv/asv,qwhelan/asv,mdboom/asv,waylonflinn/asv,waylonflinn/asv,ericdill/asv,giltis/asv,ericdill/asv,airspeed-velocity/asv,mdboom/asv,qwhelan/asv,giltis/asv,airspeed-velocity/asv,qwhelan/asv,edisongustavo/asv,mdboom/asv,spacetelescope/asv,edisongustavo/asv,ericdill/asv,pv/asv,ericdill/asv,giltis/asv,spacetelescope/asv,spacetelescope/asv,mdboom/asv,qwhelan/asv,pv/asv,edisongustavo/asv,spacetelescope/asv,airspeed-velocity/asv | ---
+++
@@ -10,18 +10,17 @@
from asv import repo
-def test_repo(tmpdir):
- conf = config.Config()
-
- conf.project = six.text_type(tmpdir.join("repo"))
- conf.repo = "https://github.com/spacetelescope/asv.git"
+def _test_generic_repo(conf,
+ hash_range="ae0c27b65741..e6f382a704f7",
+ master="master",
+ branch="gh-pages"):
r = repo.get_repo(conf)
- r.checkout("master")
- r.checkout("gh-pages")
- r.checkout("master")
+ r.checkout(master)
+ r.checkout(branch)
+ r.checkout(master)
- hashes = r.get_hashes_from_range("ae0c27b65741..e6f382a704f7")
+ hashes = r.get_hashes_from_range(hash_range)
assert len(hashes) == 4
dates = [r.get_date(hash) for hash in hashes]
@@ -30,3 +29,20 @@
tags = r.get_tags()
for tag in tags:
r.get_date_from_tag(tag)
+
+
+def test_repo_git(tmpdir):
+ conf = config.Config()
+
+ conf.project = six.text_type(tmpdir.join("repo"))
+ conf.repo = "https://github.com/spacetelescope/asv.git"
+ _test_generic_repo(conf)
+
+
+def test_repo_hg(tmpdir):
+ conf = config.Config()
+
+ conf.project = six.text_type(tmpdir.join("repo"))
+ conf.repo = "hg+https://bitbucket.org/nds-org/nds-labs"
+ _test_generic_repo(conf, hash_range="a8ca24ac6b77:9dc758deba8",
+ master="tip", branch="dev") |
f668f6066864b1efe3863cdb43b8fee4e08a312b | test/test_mk_dirs.py | test/test_mk_dirs.py | from __future__ import absolute_import, print_function
from ..pyautoupdate.launcher import Launcher
from .pytest_makevers import create_update_dir
import os
def test_mk_dirs(create_update_dir):
"""Test that ensures that downlaods directory is created properly"""
assert not os.path.isdir(Launcher.updatedir)
launch = Launcher('MUST_HAVE_SOMETHING', 'urlurlurl')
launch._reset_update_files()
assert os.path.isdir(Launcher.updatedir)
| from __future__ import absolute_import, print_function
from ..pyautoupdate.launcher import Launcher
from .pytest_makevers import create_update_dir
import os
def test_mk_dirs(create_update_dir):
"""Test that ensures that downlaods directory is created properly"""
assert not os.path.isdir(Launcher.updatedir)
launch = Launcher('MUST_HAVE_SOMETHING', 'urlurlurl')
launch._reset_update_files()
assert os.path.isdir(Launcher.updatedir)
os.rmdir(Launcher.updatedir)
| Remove Launcher.updatedir after mkdirs test | Remove Launcher.updatedir after mkdirs test
Should go into fixture later
| Python | lgpl-2.1 | rlee287/pyautoupdate,rlee287/pyautoupdate | ---
+++
@@ -10,3 +10,4 @@
launch = Launcher('MUST_HAVE_SOMETHING', 'urlurlurl')
launch._reset_update_files()
assert os.path.isdir(Launcher.updatedir)
+ os.rmdir(Launcher.updatedir) |
b5d812504924af2e2781f4be63a6191e5c47879d | test_project/urls.py | test_project/urls.py | """test_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.conf import settings
from django.contrib import admin
from django.views.generic import TemplateView
TEST_TEMPLATE = getattr(settings, 'TEST_TEMPLATE', 'test.html')
urlpatterns = [
url('^accounts/', include('django.contrib.auth.urls')),
url(r'^admin/', admin.site.urls),
url(r'^$', TemplateView.as_view(template_name=TEST_TEMPLATE)),
]
| """test_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.conf import settings
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url('^accounts/', include('django.contrib.auth.urls')),
url(r'^admin/', admin.site.urls),
]
TEST_TEMPLATES = getattr(
settings, 'TEST_TEMPLATES', [(r'^$', 'test.html')])
for path, template in TEST_TEMPLATES:
urlpatterns.append(url(path, TemplateView.as_view(template_name=template)))
| Support multiple templates in TEST_TEMPLATES setting. | Support multiple templates in TEST_TEMPLATES setting.
Unit tests need to be able to test redirects and other features
involving multiple web pages. This commit changes the singleton
TEST_TEMPLATE setting to TEST_TEMPLATES, which is a list of
path, template tuples.
| Python | bsd-3-clause | nimbis/django-selenium-testcase,nimbis/django-selenium-testcase | ---
+++
@@ -18,10 +18,13 @@
from django.contrib import admin
from django.views.generic import TemplateView
-TEST_TEMPLATE = getattr(settings, 'TEST_TEMPLATE', 'test.html')
-
urlpatterns = [
url('^accounts/', include('django.contrib.auth.urls')),
url(r'^admin/', admin.site.urls),
- url(r'^$', TemplateView.as_view(template_name=TEST_TEMPLATE)),
]
+
+TEST_TEMPLATES = getattr(
+ settings, 'TEST_TEMPLATES', [(r'^$', 'test.html')])
+
+for path, template in TEST_TEMPLATES:
+ urlpatterns.append(url(path, TemplateView.as_view(template_name=template))) |
d0b2b0aa3674fb6b85fd788e88a3a54f4cc22046 | pytablewriter/_excel_workbook.py | pytablewriter/_excel_workbook.py | # encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <[email protected]>
"""
from __future__ import absolute_import
import xlsxwriter
class ExcelWorkbookXlsx(object):
@property
def workbook(self):
return self.__workbook
@property
def file_path(self):
return self.__file_path
def __init__(self, file_path):
self.open(file_path)
def __del__(self):
self.close()
def open(self, file_path):
self.__file_path = file_path
self.__workbook = xlsxwriter.Workbook(file_path)
def close(self):
if self.workbook is None:
return
self.__workbook.close()
self.__clear()
def add_worksheet(self, worksheet_name):
worksheet = self.__workbook.add_worksheet(worksheet_name)
return worksheet
def __clear(self):
self.__workbook = None
self.__file_path = None
| # encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <[email protected]>
"""
from __future__ import absolute_import
import abc
import six
import xlsxwriter
@six.add_metaclass(abc.ABCMeta)
class ExcelWorkbookInterface(object):
@abc.abstractproperty
def workbook(self):
pass
@abc.abstractproperty
def file_path(self):
pass
@abc.abstractmethod
def open(self, file_path):
pass
@abc.abstractmethod
def close(self):
pass
class ExcelWorkbook(ExcelWorkbookInterface):
@property
def workbook(self):
return self._workbook
@property
def file_path(self):
return self._file_path
def _clear(self):
self._workbook = None
self._file_path = None
class ExcelWorkbookXlsx(ExcelWorkbook):
def __init__(self, file_path):
self.open(file_path)
def __del__(self):
self.close()
def open(self, file_path):
self._file_path = file_path
self._workbook = xlsxwriter.Workbook(file_path)
def close(self):
if self.workbook is None:
return
self._workbook.close()
self._clear()
def add_worksheet(self, worksheet_name):
worksheet = self.workbook.add_worksheet(worksheet_name)
return worksheet
| Add an interface class and a base class of for Excel Workbook | Add an interface class and a base class of for Excel Workbook
| Python | mit | thombashi/pytablewriter | ---
+++
@@ -5,19 +5,48 @@
"""
from __future__ import absolute_import
+import abc
+import six
import xlsxwriter
-class ExcelWorkbookXlsx(object):
[email protected]_metaclass(abc.ABCMeta)
+class ExcelWorkbookInterface(object):
+
+ @abc.abstractproperty
+ def workbook(self):
+ pass
+
+ @abc.abstractproperty
+ def file_path(self):
+ pass
+
+ @abc.abstractmethod
+ def open(self, file_path):
+ pass
+
+ @abc.abstractmethod
+ def close(self):
+ pass
+
+
+class ExcelWorkbook(ExcelWorkbookInterface):
@property
def workbook(self):
- return self.__workbook
+ return self._workbook
@property
def file_path(self):
- return self.__file_path
+ return self._file_path
+
+ def _clear(self):
+ self._workbook = None
+ self._file_path = None
+
+
+class ExcelWorkbookXlsx(ExcelWorkbook):
def __init__(self, file_path):
self.open(file_path)
@@ -26,21 +55,17 @@
self.close()
def open(self, file_path):
- self.__file_path = file_path
- self.__workbook = xlsxwriter.Workbook(file_path)
+ self._file_path = file_path
+ self._workbook = xlsxwriter.Workbook(file_path)
def close(self):
if self.workbook is None:
return
- self.__workbook.close()
- self.__clear()
+ self._workbook.close()
+ self._clear()
def add_worksheet(self, worksheet_name):
- worksheet = self.__workbook.add_worksheet(worksheet_name)
+ worksheet = self.workbook.add_worksheet(worksheet_name)
return worksheet
-
- def __clear(self):
- self.__workbook = None
- self.__file_path = None |
85dd4d777ccfe9e9fd23c650d43386a0b50444b7 | tests/__init__.py | tests/__init__.py | # Copyright (C) 2017 Martin Packman <[email protected]>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""Base classes and helpers for testing paramiko."""
import unittest
from paramiko.py3compat import (
builtins,
)
def skipUnlessBuiltin(name):
"""Skip decorated test if builtin name does not exist."""
if getattr(builtins, name, None) is None:
skip = getattr(unittest, "skip", None)
if skip is None:
# Python 2.6 pseudo-skip
return lambda func: None
return skip("No builtin " + repr(name))
return lambda func: func
| Add new skipUnlessBuiltin function for testing | Add new skipUnlessBuiltin function for testing
| Python | lgpl-2.1 | paramiko/paramiko,mirrorcoder/paramiko,jaraco/paramiko,SebastianDeiss/paramiko,ameily/paramiko | ---
+++
@@ -0,0 +1,36 @@
+# Copyright (C) 2017 Martin Packman <[email protected]>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+"""Base classes and helpers for testing paramiko."""
+
+import unittest
+
+from paramiko.py3compat import (
+ builtins,
+ )
+
+
+def skipUnlessBuiltin(name):
+ """Skip decorated test if builtin name does not exist."""
+ if getattr(builtins, name, None) is None:
+ skip = getattr(unittest, "skip", None)
+ if skip is None:
+ # Python 2.6 pseudo-skip
+ return lambda func: None
+ return skip("No builtin " + repr(name))
+ return lambda func: func |
|
b42cd651d3d839e3fa42aa1b155aab7a9eb33505 | tests/settings.py | tests/settings.py | # -*- coding: utf-8 -*-
import os, sys
sys.path.insert(0, '..')
PROJECT_ROOT = os.path.dirname(__file__)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'test',
'USER': 'test',
'PASSWORD': 'test',
'HOST': 'localhost',
'PORT': '',
}
}
MIDDLEWARE_CLASSES = [
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
]
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en'
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATICFILES_DIRS = ()
SECRET_KEY = 'di!n($kqa3)nd%ikad#kcjpkd^uw*h%*kj=*pm7$vbo6ir7h=l'
INSTALLED_APPS = (
'pg_json_fields',
)
#TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner'
| # -*- coding: utf-8 -*-
import os, sys
sys.path.insert(0, '..')
PROJECT_ROOT = os.path.dirname(__file__)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'test',
'USER': '',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
MIDDLEWARE_CLASSES = [
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
]
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en'
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATICFILES_DIRS = ()
SECRET_KEY = 'di!n($kqa3)nd%ikad#kcjpkd^uw*h%*kj=*pm7$vbo6ir7h=l'
INSTALLED_APPS = (
'pg_json_fields',
)
#TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner'
| Use default user/password for testing | Use default user/password for testing
| Python | bsd-3-clause | djangonauts/django-pgjson,djangonauts/django-pgjson | ---
+++
@@ -12,8 +12,8 @@
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'test',
- 'USER': 'test',
- 'PASSWORD': 'test',
+ 'USER': '',
+ 'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
} |
15c474fb25479f044e0199a26e5f0ec95c2bb0ec | tests/test_api.py | tests/test_api.py | import json
def test_get_user_list(client, user):
response = client.get("/api/v1/users")
user_list = json.loads(str(response.data))
user_data = user_list["data"][0]
assert user_data["github_name"] == user.github_name
assert user_data["github_url"] == user.github_url
def test_get_user(client, user):
response = client.get("/api/v1/users/" + str(user.id))
user_data = json.loads(str(response.data))
assert user_data["github_name"] == user.github_name
assert user_data["github_url"] == user.github_url | import json
def test_get_user_list(client, user):
response = client.get("/api/v1/users")
user_list = json.loads(response.get_data().decode("utf-8"))
user_data = user_list["data"][0]
assert user_data["github_name"] == user.github_name
assert user_data["github_url"] == user.github_url
def test_get_user(client, user):
response = client.get("/api/v1/users/" + str(user.id))
user_data = json.loads(response.get_data().decode("utf-8"))["data"]
assert user_data["github_name"] == user.github_name
assert user_data["github_url"] == user.github_url | Fix tests to correctly decode utf-8 bytestrings. | Fix tests to correctly decode utf-8 bytestrings.
| Python | mit | PythonClutch/python-clutch,PythonClutch/python-clutch,PythonClutch/python-clutch | ---
+++
@@ -3,7 +3,7 @@
def test_get_user_list(client, user):
response = client.get("/api/v1/users")
- user_list = json.loads(str(response.data))
+ user_list = json.loads(response.get_data().decode("utf-8"))
user_data = user_list["data"][0]
assert user_data["github_name"] == user.github_name
assert user_data["github_url"] == user.github_url
@@ -11,6 +11,6 @@
def test_get_user(client, user):
response = client.get("/api/v1/users/" + str(user.id))
- user_data = json.loads(str(response.data))
+ user_data = json.loads(response.get_data().decode("utf-8"))["data"]
assert user_data["github_name"] == user.github_name
assert user_data["github_url"] == user.github_url |
9247021be1dc60acd11104ec1de04ea5718c054c | tests/test_config.py | tests/test_config.py | import sys
import unittest
from skeletor.config import Config
from .helpers import nostdout
class ConfigTests(unittest.TestCase):
""" Argument Passing & Config Tests. """
def setUp(self):
self._old_sys_argv = sys.argv
sys.argv = [self._old_sys_argv[0].replace('nosetests', 'skeletor')]
def tearDown(self):
sys.argv = self._old_sys_argv
def should_exit_with_no_arguments(self):
try:
with nostdout():
Config()
except SystemExit:
assert True
def test_name_gets_set(self):
with nostdout():
sys.argv = ['', '-n', 'hello_world']
c = Config()
self.assertEquals(c.project_name, 'hello_world')
| import sys
import unittest
from skeletor.config import Config
from .helpers import nostdout
class ConfigTests(unittest.TestCase):
""" Argument Passing & Config Tests. """
def setUp(self):
self._old_sys_argv = sys.argv
sys.argv = [self._old_sys_argv[0].replace('nosetests', 'skeletor')]
def tearDown(self):
sys.argv = self._old_sys_argv
def should_exit_with_no_arguments(self):
try:
with nostdout():
Config()
except SystemExit:
assert True
def ensure_valid_project_name(self):
with nostdout():
sys.argv = ['', '-n', 'this_is_valid']
c = Config()
self.assertEquals(c.project_name, 'this_is_valid')
with nostdout():
sys.argv = ['', '-n', 'Thisisvalid']
c = Config()
self.assertEquals(c.project_name, 'Thisisvalid')
def should_exit_on_invalid_name(self):
try:
with nostdout():
sys.argv = ['', '-n', 'not-valid']
Config()
except SystemExit:
assert True
try:
with nostdout():
sys.argv = ['', '-n', 'not valid']
Config()
except SystemExit:
assert True
try:
with nostdout():
sys.argv = ['', '-n', 'not_valid-*']
Config()
except SystemExit:
assert True
| Test for valid and invalid project names | Test for valid and invalid project names
| Python | bsd-3-clause | krak3n/Facio,krak3n/Facio,krak3n/Facio,krak3n/Facio,krak3n/Facio | ---
+++
@@ -23,8 +23,32 @@
except SystemExit:
assert True
- def test_name_gets_set(self):
+ def ensure_valid_project_name(self):
with nostdout():
- sys.argv = ['', '-n', 'hello_world']
+ sys.argv = ['', '-n', 'this_is_valid']
c = Config()
- self.assertEquals(c.project_name, 'hello_world')
+ self.assertEquals(c.project_name, 'this_is_valid')
+ with nostdout():
+ sys.argv = ['', '-n', 'Thisisvalid']
+ c = Config()
+ self.assertEquals(c.project_name, 'Thisisvalid')
+
+ def should_exit_on_invalid_name(self):
+ try:
+ with nostdout():
+ sys.argv = ['', '-n', 'not-valid']
+ Config()
+ except SystemExit:
+ assert True
+ try:
+ with nostdout():
+ sys.argv = ['', '-n', 'not valid']
+ Config()
+ except SystemExit:
+ assert True
+ try:
+ with nostdout():
+ sys.argv = ['', '-n', 'not_valid-*']
+ Config()
+ except SystemExit:
+ assert True |
fc91e70bfa2d46ce923cdd3e2f2d591f8a5b367b | tests/test_person.py | tests/test_person.py | import unittest
from classes.person import Person
class PersonClassTest(unittest.TestCase):
pass
# def test_add_person_successfully(self):
# my_class_instance = Person()
# initial_person_count = len(my_class_instance.all_persons)
# staff_neil = my_class_instance.add_person("Neil Armstrong", "staff", "Y")
# self.assertTrue(staff_neil)
# new_person_count = len(my_class_instance.all_persons)
# self.assertEqual(new_person_count - initial_person_count, 1)
#
# def test_inputs_are_strings(self):
# with self.assertRaises(ValueError, msg='Only strings are allowed as input'):
# my_class_instance = Person()
# my_class_instance.add_person("Fellow", "Peter", 23)
#
# def test_wants_accommodation_default_is_N(self):
# my_class_instance = Person()
# my_class_instance.add_person("Fellow", "Peter", "Musonye")
# result = my_class_instance.all_persons
# self.assertEqual(result[0]['fellow']['peter musonye'], 'N', msg="The value of wants_accommodation should be N if it is not provided")
| import unittest
from classes.person import Person
class PersonClassTest(unittest.TestCase):
def test_full_name_only_returns_strings(self):
with self.assertRaises(ValueError, msg='Only strings are allowed as names'):
my_class_instance = Person("staff", "Peter", "Musonye")
my_class_instance.full_name()
| Add tests for class Person | Add tests for class Person
| Python | mit | peterpaints/room-allocator | ---
+++
@@ -3,22 +3,7 @@
class PersonClassTest(unittest.TestCase):
- pass
- # def test_add_person_successfully(self):
- # my_class_instance = Person()
- # initial_person_count = len(my_class_instance.all_persons)
- # staff_neil = my_class_instance.add_person("Neil Armstrong", "staff", "Y")
- # self.assertTrue(staff_neil)
- # new_person_count = len(my_class_instance.all_persons)
- # self.assertEqual(new_person_count - initial_person_count, 1)
- #
- # def test_inputs_are_strings(self):
- # with self.assertRaises(ValueError, msg='Only strings are allowed as input'):
- # my_class_instance = Person()
- # my_class_instance.add_person("Fellow", "Peter", 23)
- #
- # def test_wants_accommodation_default_is_N(self):
- # my_class_instance = Person()
- # my_class_instance.add_person("Fellow", "Peter", "Musonye")
- # result = my_class_instance.all_persons
- # self.assertEqual(result[0]['fellow']['peter musonye'], 'N', msg="The value of wants_accommodation should be N if it is not provided")
+ def test_full_name_only_returns_strings(self):
+ with self.assertRaises(ValueError, msg='Only strings are allowed as names'):
+ my_class_instance = Person("staff", "Peter", "Musonye")
+ my_class_instance.full_name() |
a98b8e78d48ce28e63ed0be2a9dbc008cc21ba97 | pi_broadcast_service/rabbit.py | pi_broadcast_service/rabbit.py | import json
import pika
class Publisher(object):
def __init__(self, rabbit_url, exchange):
self._rabbit_url = rabbit_url
self._exchange = exchange
self._connection = pika.BlockingConnection(pika.URLParameters(self._rabbit_url))
self._channel = self._connection.channel()
def send(self, routing_key, message):
self._channel.basic_publish(
exchange=self._exchange,
routing_key=routing_key,
body=json.dumps(message))
| import json
import pika
class Publisher(object):
def __init__(self, rabbit_url, exchange):
self._rabbit_url = rabbit_url
self._exchange = exchange
self._connection = pika.BlockingConnection(pika.URLParameters(self._rabbit_url))
self._channel = self._connection.channel()
def send(self, routing_key, message):
self._channel.basic_publish(
exchange=self._exchange,
routing_key=routing_key,
body=json.dumps(message))
def stop(self):
self._connection.close()
| Add a stop method to base class | Add a stop method to base class
| Python | mit | projectweekend/Pi-Broadcast-Service | ---
+++
@@ -15,3 +15,6 @@
exchange=self._exchange,
routing_key=routing_key,
body=json.dumps(message))
+
+ def stop(self):
+ self._connection.close() |
3964606d6f0e28b127af57b1d13c12b3352f861a | ggd/__main__.py | ggd/__main__.py | import sys
import argparse
from .__init__ import __version__
from . make_bash import add_make_bash
from . check_recipe import add_check_recipe
from . list_files import add_list_files
from . search import add_search
from . show_env import add_show_env
def main(args=None):
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(prog='ggd', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-v", "--version", help="Installed version",
action="version",
version="%(prog)s " + str(__version__))
sub = parser.add_subparsers(title='[sub-commands]', dest='command')
sub.required = True
add_make_bash(sub)
add_check_recipe(sub)
add_list_files(sub)
add_search(sub)
add_show_env(sub)
args = parser.parse_args(args)
args.func(parser, args)
if __name__ == "__main__":
sys.exit(main() or 0)
| import sys
import argparse
from .__init__ import __version__
from . make_bash import add_make_bash
from . check_recipe import add_check_recipe
from . list_files import add_list_files
from . search import add_search
from . show_env import add_show_env
from . install import add_install
from . uninstall import add_uninstall
def main(args=None):
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(prog='ggd', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-v", "--version", help="Installed version",
action="version",
version="%(prog)s " + str(__version__))
sub = parser.add_subparsers(title='[sub-commands]', dest='command')
sub.required = True
add_make_bash(sub)
add_check_recipe(sub)
add_list_files(sub)
add_search(sub)
add_show_env(sub)
add_install(sub)
add_uninstall(sub)
args = parser.parse_args(args)
args.func(parser, args)
if __name__ == "__main__":
sys.exit(main() or 0)
| Add installer and uninstaller to main | Add installer and uninstaller to main
| Python | mit | gogetdata/ggd-cli,gogetdata/ggd-cli | ---
+++
@@ -6,6 +6,8 @@
from . list_files import add_list_files
from . search import add_search
from . show_env import add_show_env
+from . install import add_install
+from . uninstall import add_uninstall
def main(args=None):
if args is None:
@@ -27,6 +29,10 @@
add_show_env(sub)
+ add_install(sub)
+
+ add_uninstall(sub)
+
args = parser.parse_args(args)
args.func(parser, args)
|
ca94513b3487232a2f9714ddc129d141c011b4af | dadd/master/admin.py | dadd/master/admin.py | from flask.ext.admin import Admin
from flask.ext.admin.contrib.sqla import ModelView
from dadd.master import models
class ProcessModelView(ModelView):
# Make the latest first
column_default_sort = ('start_time', True)
def __init__(self, session):
super(ProcessModelView, self).__init__(models.Process, session)
def admin(app):
admin = Admin(app)
session = models.db.session
admin.add_view(ProcessModelView(session))
admin.add_view(ModelView(models.Host, session))
admin.add_view(ModelView(models.Logfile, session))
| from flask.ext.admin import Admin
from flask.ext.admin.contrib.sqla import ModelView
from dadd.master import models
class ProcessModelView(ModelView):
# Make the latest first
column_default_sort = ('start_time', True)
def __init__(self, session):
super(ProcessModelView, self).__init__(models.Process, session)
class LogfileModelView(ModelView):
# Make the latest first
column_default_sort = ('added_time', True)
def __init__(self, session):
super(LogfileModelView, self).__init__(models.Logfile, session)
def admin(app):
admin = Admin(app)
session = models.db.session
admin.add_view(ProcessModelView(session))
admin.add_view(LogfileModelView(session))
admin.add_view(ModelView(models.Host, session))
| Sort the logfile by added time. | Sort the logfile by added time.
| Python | bsd-3-clause | ionrock/dadd,ionrock/dadd,ionrock/dadd,ionrock/dadd | ---
+++
@@ -12,9 +12,19 @@
super(ProcessModelView, self).__init__(models.Process, session)
+class LogfileModelView(ModelView):
+ # Make the latest first
+ column_default_sort = ('added_time', True)
+
+ def __init__(self, session):
+ super(LogfileModelView, self).__init__(models.Logfile, session)
+
+
def admin(app):
admin = Admin(app)
session = models.db.session
+
admin.add_view(ProcessModelView(session))
+ admin.add_view(LogfileModelView(session))
+
admin.add_view(ModelView(models.Host, session))
- admin.add_view(ModelView(models.Logfile, session)) |
5cfb7a1b0feca5cd33f93447cfc43c1c944d4810 | tests/test_dragon.py | tests/test_dragon.py | import pytest
from mugloar import dragon
def test_partition():
for solution in dragon.partition(20, 4, 0, 10):
print(solution)
assert abs(solution[0]) + abs(solution[1]) + abs(solution[2]) + abs(solution[3]) == 20
| import pytest
from mugloar import dragon
@pytest.fixture
def dragon_instance():
return dragon.Dragon()
@pytest.fixture
def knight():
return [('endurance', 8), ('attack', 5), ('armor', 4), ('agility', 3)]
@pytest.fixture
def dragon_stats():
return 10, 10, 0, 0
def test_set_relative_stats(dragon_instance, dragon_stats, knight):
dragon_instance.set_relative_stats(dragon_stats, knight)
def test_partition():
for solution in dragon.partition(20, 4, 0, 10):
assert abs(solution[0]) + abs(solution[1]) + abs(solution[2]) + abs(solution[3]) == 20
| Implement rudimentary unit tests for dragon class | Implement rudimentary unit tests for dragon class
| Python | mit | reinikai/mugloar | ---
+++
@@ -2,7 +2,25 @@
from mugloar import dragon
[email protected]
+def dragon_instance():
+ return dragon.Dragon()
+
+
[email protected]
+def knight():
+ return [('endurance', 8), ('attack', 5), ('armor', 4), ('agility', 3)]
+
+
[email protected]
+def dragon_stats():
+ return 10, 10, 0, 0
+
+
+def test_set_relative_stats(dragon_instance, dragon_stats, knight):
+ dragon_instance.set_relative_stats(dragon_stats, knight)
+
+
def test_partition():
for solution in dragon.partition(20, 4, 0, 10):
- print(solution)
assert abs(solution[0]) + abs(solution[1]) + abs(solution[2]) + abs(solution[3]) == 20 |
918db0dddae47e660aecabba73b460dc13ca0bc4 | tests/test_cli_eigenvals.py | tests/test_cli_eigenvals.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pytest
import tempfile
import numpy as np
import bands_inspect as bi
from click.testing import CliRunner
import tbmodels
from tbmodels._cli import cli
def test_cli_eigenvals(sample):
samples_dir = sample('cli_eigenvals')
runner = CliRunner()
with tempfile.NamedTemporaryFile() as out_file:
run = runner.invoke(
cli, [
'eigenvals', '-o', out_file.name, '-k',
os.path.join(samples_dir, 'kpoints.hdf5'), '-i',
os.path.join(samples_dir, 'silicon_model.hdf5')
],
catch_exceptions=False
)
print(run.output)
res = bi.io.load(out_file.name)
reference = bi.io.load(os.path.join(samples_dir, 'silicon_eigenvals.hdf5'))
np.testing.assert_allclose(bi.compare.difference.general(res, reference), 0, atol=1e-10)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pytest
import tempfile
import numpy as np
import bands_inspect as bi
from click.testing import CliRunner
import tbmodels
from tbmodels._cli import cli
def test_cli_eigenvals(sample):
samples_dir = sample('cli_eigenvals')
runner = CliRunner()
with tempfile.NamedTemporaryFile() as out_file:
run = runner.invoke(
cli, [
'eigenvals', '-o', out_file.name, '-k',
os.path.join(samples_dir, 'kpoints.hdf5'), '-i',
os.path.join(samples_dir, 'silicon_model.hdf5')
],
catch_exceptions=False
)
print(run.output)
res = bi.io.load(out_file.name)
reference = bi.io.load(os.path.join(samples_dir, 'silicon_eigenvals.hdf5'))
np.testing.assert_allclose(bi.compare.difference.calculate(res, reference), 0, atol=1e-10)
| Fix test for new bands_inspect version. | Fix test for new bands_inspect version.
| Python | apache-2.0 | Z2PackDev/TBmodels,Z2PackDev/TBmodels | ---
+++
@@ -28,4 +28,4 @@
print(run.output)
res = bi.io.load(out_file.name)
reference = bi.io.load(os.path.join(samples_dir, 'silicon_eigenvals.hdf5'))
- np.testing.assert_allclose(bi.compare.difference.general(res, reference), 0, atol=1e-10)
+ np.testing.assert_allclose(bi.compare.difference.calculate(res, reference), 0, atol=1e-10) |
3afe14ee6beb1a3177d929bacb20b3c4bb9363d7 | tests/test_parser.py | tests/test_parser.py | import unittest
from xhtml2pdf.parser import pisaParser
from xhtml2pdf.context import pisaContext
_data = b"""
<!doctype html>
<html>
<title>TITLE</title>
<body>
BODY
</body>
</html>
"""
class TestCase(unittest.TestCase):
def testParser(self):
c = pisaContext(".")
r = pisaParser(_data, c)
self.assertEqual(c, r)
def test_getFile(self):
c = pisaContext(".")
r = pisaParser(_data, c)
self.assertEqual(c.getFile(None), None)
def test_height_as_list(self):
"""Asserts attributes like 'height: 10px !important" are parsed"""
c = pisaContext(".")
data = b"<p style='height: 10px !important;width: 10px !important'>test</p>"
r = pisaParser(data, c)
self.assertEqual(c, r)
def buildTestSuite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
def main():
buildTestSuite()
unittest.main()
if __name__ == "__main__":
main()
| import unittest
from xhtml2pdf.parser import pisaParser
from xhtml2pdf.context import pisaContext
_data = b"""
<!doctype html>
<html>
<title>TITLE</title>
<body>
BODY
</body>
</html>
"""
class TestCase(unittest.TestCase):
def testParser(self):
c = pisaContext(".")
r = pisaParser(_data, c)
self.assertEqual(c, r)
self.assertEqual(r.err, 0)
self.assertEqual(r.warn, 0)
def test_getFile(self):
c = pisaContext(".")
r = pisaParser(_data, c)
self.assertEqual(c.getFile(None), None)
self.assertEqual(r.err, 0)
self.assertEqual(r.warn, 0)
def test_height_as_list(self):
"""Asserts attributes like 'height: 10px !important" are parsed"""
c = pisaContext(".")
data = b"<p style='height: 10px !important;width: 10px !important'>test</p>"
r = pisaParser(data, c)
self.assertEqual(c, r)
self.assertEqual(r.err, 0)
self.assertEqual(r.warn, 0)
def test_image_base64(self):
c = pisaContext(".")
data = b'<img src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs=">'
r = pisaParser(data, c)
self.assertEqual(r.warn, 0)
def buildTestSuite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
def main():
buildTestSuite()
unittest.main()
if __name__ == "__main__":
main()
| Add tests for base64 image | Add tests for base64 image
| Python | apache-2.0 | trib3/xhtml2pdf,chrisglass/xhtml2pdf,jensadne/xhtml2pdf,tinjyuu/xhtml2pdf,xhtml2pdf/xhtml2pdf,orbitvu/xhtml2pdf,chrisglass/xhtml2pdf,trib3/xhtml2pdf,tinjyuu/xhtml2pdf,orbitvu/xhtml2pdf,jensadne/xhtml2pdf,xhtml2pdf/xhtml2pdf | ---
+++
@@ -19,11 +19,15 @@
c = pisaContext(".")
r = pisaParser(_data, c)
self.assertEqual(c, r)
+ self.assertEqual(r.err, 0)
+ self.assertEqual(r.warn, 0)
def test_getFile(self):
c = pisaContext(".")
r = pisaParser(_data, c)
self.assertEqual(c.getFile(None), None)
+ self.assertEqual(r.err, 0)
+ self.assertEqual(r.warn, 0)
def test_height_as_list(self):
"""Asserts attributes like 'height: 10px !important" are parsed"""
@@ -31,6 +35,14 @@
data = b"<p style='height: 10px !important;width: 10px !important'>test</p>"
r = pisaParser(data, c)
self.assertEqual(c, r)
+ self.assertEqual(r.err, 0)
+ self.assertEqual(r.warn, 0)
+
+ def test_image_base64(self):
+ c = pisaContext(".")
+ data = b'<img src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs=">'
+ r = pisaParser(data, c)
+ self.assertEqual(r.warn, 0)
def buildTestSuite(): |
432233a99d9036f358716b48a0e26054a7e217bf | SlugifyCommand.py | SlugifyCommand.py | # encoding: utf-8
'''This adds a "slugify" command to be invoked by Sublime Text. It is made
available as "Slugify" in the command palette by Default.sublime-commands.
Parts of these commands are borrowed from the sublime-slug package:
https://github.com/madeingnecca/sublime-slug
'''
from __future__ import unicode_literals
import sublime
import sublime_plugin
try:
# This import method works in Sublime Text 2.
from slugify import slugify
except ImportError:
# While this works in Sublime Text 3.
from .slugify import slugify
class SlugifyCommand(sublime_plugin.TextCommand):
separator = '-'
def run(self, edit):
def done(value):
self.separator = value
self.view.run_command('slugify_replace', {'separator': self.separator})
window = self.view.window()
window.show_input_panel('Separator', self.separator, done, None, None)
class SlugifyReplaceCommand(sublime_plugin.TextCommand):
def run(self, edit, separator):
regions = self.view.sel()
# Only run if there is a selection.
if len(regions) > 1 or not regions[0].empty():
for region in regions:
text = self.view.substr(region)
self.view.replace(edit, region, slugify(text, separator))
| # encoding: utf-8
'''This adds a "slugify" command to be invoked by Sublime Text. It is made
available as "Slugify" in the command palette by Default.sublime-commands.
Parts of these commands are borrowed from the sublime-slug package:
https://github.com/madeingnecca/sublime-slug
'''
from __future__ import unicode_literals
import sublime
import sublime_plugin
# For this plugin to work on Windows, we need to include the path of the Sublime
# Text application itself to the import search path.
import os
import sys
sys.path.append(os.path.dirname(sys.executable))
try:
# This import method works in Sublime Text 2.
from slugify import slugify
except ImportError:
# While this works in Sublime Text 3.
from .slugify import slugify
class SlugifyCommand(sublime_plugin.TextCommand):
separator = '-'
def run(self, edit):
def done(value):
self.separator = value
self.view.run_command('slugify_replace', {'separator': self.separator})
window = self.view.window()
window.show_input_panel('Separator', self.separator, done, None, None)
class SlugifyReplaceCommand(sublime_plugin.TextCommand):
def run(self, edit, separator):
regions = self.view.sel()
# Only run if there is a selection.
if len(regions) > 1 or not regions[0].empty():
for region in regions:
text = self.view.substr(region)
self.view.replace(edit, region, slugify(text, separator))
| Fix broken plugin on Windows. | Fix broken plugin on Windows.
| Python | mit | alimony/sublime-slugify | ---
+++
@@ -10,6 +10,13 @@
import sublime
import sublime_plugin
+
+# For this plugin to work on Windows, we need to include the path of the Sublime
+# Text application itself to the import search path.
+import os
+import sys
+sys.path.append(os.path.dirname(sys.executable))
+
try:
# This import method works in Sublime Text 2.
from slugify import slugify |
c347e6e763b79a9c4af6d7776093ce9ed711c43d | monkeys/release.py | monkeys/release.py | from invoke import task, run
@task
def makerelease(ctx, version, local_only=False):
if not version:
raise Exception("You must specify a version!")
# FoodTruck assets.
print("Update node modules")
run("npm install")
print("Generating Wikked assets")
run("gulp")
if not local_only:
# Tag in Mercurial, which will then be used for PyPi version.
run("hg tag %s" % version)
# PyPi upload.
run("python setup.py sdist upload")
else:
print("Would tag repo with %s..." % version)
print("Would upload to PyPi...")
| from invoke import task, run
@task
def makerelease(ctx, version, local_only=False):
if not version:
raise Exception("You must specify a version!")
# FoodTruck assets.
print("Update node modules")
run("npm install")
print("Generating Wikked assets")
run("gulp")
if not local_only:
# Tag in Mercurial, which will then be used for PyPi version.
run("hg tag %s" % version)
# PyPi upload.
run("python setup.py sdist bdist_wheel")
run("twine upload dist/Wikked-%s.tar.gz" % version)
else:
print("Would tag repo with %s..." % version)
print("Would upload to PyPi...")
| Use `twine` to deploy Wikked to Pypi. | cm: Use `twine` to deploy Wikked to Pypi.
| Python | apache-2.0 | ludovicchabant/Wikked,ludovicchabant/Wikked,ludovicchabant/Wikked | ---
+++
@@ -17,7 +17,8 @@
run("hg tag %s" % version)
# PyPi upload.
- run("python setup.py sdist upload")
+ run("python setup.py sdist bdist_wheel")
+ run("twine upload dist/Wikked-%s.tar.gz" % version)
else:
print("Would tag repo with %s..." % version)
print("Would upload to PyPi...") |
cbe447825408d7178e1b4eb4bf981600001ada32 | rymtracks/services/archiveorg.py | rymtracks/services/archiveorg.py | # -*- coding: utf-8 -*-
"""
This module contains Service implementation of Archive.org.
http://archive.org
"""
from . import Service, JSONMixin
from six import text_type
from tornado.httpclient import HTTPRequest
##############################################################################
class ArchiveOrg(JSONMixin, Service):
"""
Implementation of Service which is intended to parse Archive.org.
"""
def generate_request(self):
resource = self.url.rstrip("/").rpartition("/")[-1]
return HTTPRequest(
"http://archive.org/metadata/" + resource + "/files/",
use_gzip=True,
headers=dict(Accept="application/json")
)
def parse(self, response):
converted_response = self.convert_response(response)
tracks = {}
required_fields = ("title", "track", "length", "album")
for file_ in converted_response["result"]:
if file_.get("source") != "original":
continue
if not all(field in file_ for field in required_fields):
continue
track = int(file_["track"])
title = text_type(file_["title"])
length = text_type(file_["length"])
if ":" not in length:
length = int(float(length))
length = self.second_to_timestamp(length)
length = self.normalize_track_length(length)
tracks[track] = (title, length)
if not tracks:
raise Exception("Empty list")
return tuple(data for track, data in sorted(tracks.iteritems())) | # -*- coding: utf-8 -*-
"""
This module contains Service implementation of Archive.org.
http://archive.org
"""
from . import Service, JSONMixin
from six import text_type
from tornado.httpclient import HTTPRequest
##############################################################################
class ArchiveOrg(JSONMixin, Service):
"""
Implementation of Service which is intended to parse Archive.org.
"""
def generate_request(self):
resource = self.url.rstrip("/").rpartition("/")[-1]
return HTTPRequest(
"http://archive.org/metadata/" + resource + "/files/",
use_gzip=True,
headers=dict(Accept="application/json")
)
def parse(self, response):
converted_response = self.convert_response(response)
tracks = {}
required_fields = ("title", "track", "album")
for file_ in converted_response["result"]:
if file_.get("source") != "original":
continue
if not all(field in file_ for field in required_fields):
continue
track = int(file_["track"])
title = text_type(file_["title"])
length = text_type(file_.get("length", ""))
if length and ":" not in length:
length = int(float(length))
length = self.second_to_timestamp(length)
length = self.normalize_track_length(length)
tracks[track] = (title, length)
if not tracks:
raise Exception("Empty list")
return tuple(data for track, data in sorted(tracks.iteritems())) | Put weaker requirements on Archive.org service | Put weaker requirements on Archive.org service
| Python | mit | 9seconds/rymtracks | ---
+++
@@ -31,7 +31,7 @@
converted_response = self.convert_response(response)
tracks = {}
- required_fields = ("title", "track", "length", "album")
+ required_fields = ("title", "track", "album")
for file_ in converted_response["result"]:
if file_.get("source") != "original":
continue
@@ -40,8 +40,8 @@
track = int(file_["track"])
title = text_type(file_["title"])
- length = text_type(file_["length"])
- if ":" not in length:
+ length = text_type(file_.get("length", ""))
+ if length and ":" not in length:
length = int(float(length))
length = self.second_to_timestamp(length)
length = self.normalize_track_length(length) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.