commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
---|---|---|---|---|---|---|---|
2460bd91632da0e6b02e0faf379fe27b273575bc
|
Add rotate.py
|
rotate.py
|
rotate.py
|
Python
| 0.000004 |
@@ -0,0 +1,72 @@
+%22%22%22Funtion to rotate image 90 degress.%22%22%22%0A%0A%0Adef rotate(matrix):%0A pass
|
|
7a068872a071af2e60bf24ca7a00b3f1e999f139
|
add request builder
|
builders.py
|
builders.py
|
Python
| 0 |
@@ -0,0 +1,1565 @@
+# -*- coding: utf-8 -*-%0A%0Aimport json%0A%0A%0Aclass PostBuilder(object):%0A%0A def __init__(self):%0A self.parameters = %7B%0A 'title': '',%0A 'body': '',%0A 'coediting': False,%0A 'gist': False,%0A 'private': False,%0A 'tags': %5B%5D,%0A 'tweet': False%0A %7D%0A%0A def body(self, content):%0A %22%22%22%0A Args:%0A content: str%0A %22%22%22%0A self.parameters%5B'body'%5D = content%0A return self%0A%0A def coediting(self, flag):%0A %22%22%22%0A Args:%0A flag: bool%0A %22%22%22%0A self.parameters%5B'coediting'%5D = flag%0A return self%0A%0A def gist(self, flag):%0A %22%22%22%0A Args:%0A flag: bool%0A %22%22%22%0A self.parameters%5B'gist'%5D = flag%0A return self%0A%0A def private(self, flag):%0A %22%22%22%0A Args:%0A flag: bool%0A %22%22%22%0A self.parameters%5B'private'%5D = flag%0A return self%0A%0A def tags(self, t):%0A %22%22%22%0A Args:%0A t: list%5Bdict%5D%0A example : %7B%22name%22: %22tag_name%22, %22versions%22: %5B%221.0%22%5D%7D%0A %22%22%22%0A self.parameters%5B'tags'%5D = t%0A return self%0A%0A def title(self, t):%0A self.parameters%5B'title'%5D = t%0A return self%0A%0A def tweet(self, flag):%0A self.parameters%5B'tweet'%5D = flag%0A return self%0A%0A def __str__(self):%0A return json.dumps(self.parameters)%0A%0A def encode(self):%0A %22%22%22%0A Returns:%0A condoded request json string%0A %22%22%22%0A return str(self).encode('utf-8')%0A
|
|
857a5cb7effa03e9cd700fa69ae4d3b231212754
|
Create business.py
|
business.py
|
business.py
|
Python
| 0.000003 |
@@ -0,0 +1,144 @@
+# business logic here%0A# - account managing%0A# - create%0A# - edit%0A# - delete%0A# - payment data -%3E tokens%0A# - scripts running%0A# - statistics%0A
|
|
4a45256b614ebf8a8455562b63c1d50ec1521c71
|
add a test class for auth.py
|
BigStash/t/test_auth.py
|
BigStash/t/test_auth.py
|
Python
| 0.000002 |
@@ -0,0 +1,958 @@
+from mock import Mock%0Afrom testtools.matchers import Contains%0Afrom testtools import TestCase%0A%0A%0Aclass AuthTest(TestCase):%0A def setUp(self):%0A super(AuthTest, self).setUp()%0A%0A def tearDown(self):%0A super(AuthTest, self).tearDown()%0A%0A def _makeit(self, *args, **kwargs):%0A from BigStash.auth import Auth%0A return Auth(*args, **kwargs)%0A%0A def test_auth_class(self):%0A assert self._makeit(self.getUniqueString(),%0A self.getUniqueString(),%0A self.getUniqueString())%0A%0A def test_do_login(self, stdout):%0A requests = Mock()%0A requests.post.return_value = self.getUniqueString()%0A api_key = self.getUniqueString()%0A api_secret = self.getUniqueString()%0A url = self.getUniqueString()%0A%0A auth = self._makeit(api_key, api_secret, url)%0A%0A self.assertThat(auth.GetAPIKey(),%0A Contains('authentication succesfull'))%0A
|
|
c212d1c25095f3b6e2f88cfccdc5c49280b22be0
|
Add test for tilequeue changes related to #1387.
|
integration-test/1387-business-and-spur-routes.py
|
integration-test/1387-business-and-spur-routes.py
|
Python
| 0 |
@@ -0,0 +1,511 @@
+from . import FixtureTest%0A%0A%0Aclass BusinessAndSpurRoutes(FixtureTest):%0A%0A def test_first_capitol_dr_i70_business(self):%0A self.load_fixtures(%5B%0A 'https://www.openstreetmap.org/relation/1933234',%0A %5D)%0A%0A # check that First Capitol Dr, part of the above relation, is given%0A # a network that includes the %22business%22 extension.%0A self.assert_has_feature(%0A 16, 16294, 25097, 'roads',%0A %7B'id': 12276055, 'shield_text': '70', 'network': 'US:I:Business'%7D)%0A
|
|
672210c3af1a1b56a145b5265e5f316a1f6f36df
|
Add test folder
|
py3utils/test/__init__.py
|
py3utils/test/__init__.py
|
Python
| 0 |
@@ -0,0 +1 @@
+%0A
|
|
7ec15caf8f2c9d0a21581261a356f6decc548061
|
Add some basic UI tests
|
test/ui_test.py
|
test/ui_test.py
|
Python
| 0.000001 |
@@ -0,0 +1,589 @@
+from app import app%0Aimport unittest%0A%0Aclass UiTestCase(unittest.TestCase):%0A def setUp(self):%0A self.app = app.test_client()%0A%0A def test_index(self):%0A self.assertEqual(self.app.get('/').status_code, 200)%0A%0A def test_no_page(self):%0A self.assertEqual(self.app.get('/missing-page').status_code, 200)%0A%0A def test_all_pages(self):%0A self.assertEqual(self.app.get('/.all-pages').status_code, 200)%0A%0A def test_edit(self):%0A self.assertEqual(self.app.get('/.edit/Index').status_code, 200)%0A self.assertEqual(self.app.get('/.edit/').status_code, 404)%0A
|
|
59cc25693f2185ddfe36370d7f6641b2795d4798
|
Test File Upload
|
ladybug/test.py
|
ladybug/test.py
|
Python
| 0.000001 |
@@ -0,0 +1,39 @@
+import epw%0Afrom comfort.pmv import PMV%0A
|
|
d1b2d330d2a43814d89c7f17a347e425c434957d
|
Add Eoin's resampling function.
|
pyrate/tools/resampler.py
|
pyrate/tools/resampler.py
|
Python
| 0.000001 |
@@ -0,0 +1,1171 @@
+import pandas as pd%0Aimport numpy%0A%0A# Does the resampling%0A# Called internally, one of the wrapper functions should be called if its needed%0A######################%0Adef convert_messages_to_hourly_bins(df,period='H',fillnans=False,run_resample=True):%0A%0A if df.empty:%0A return df%0A%0A if run_resample:%0A %0A speed_ts=df.sog.resample(period,how='mean')%0A%0A draught_ts=df.draught.resample(period,how=numpy.max)%0A df_new=pd.DataFrame(%7B'sog':speed_ts,'draught':draught_ts%7D)%0A%0A for col in df.columns:%0A if col != 'sog' and col!='draught':%0A df_new%5Bcol%5D=df%5Bcol%5D.resample(period,how='first')%0A%0A else:%0A df_new=%5B%5D%0A%0A #set the time equal to the index%0A df_new%5B'time'%5D=df_new.index.values%0A # fill forward%0A if fillnans:%0A #forward fill first%0A df_new=df_new.fillna(method='pad')%0A #now backward fill for remain%0A df_new=df_new.fillna(method='bfill')%0A else:%0A #remove all entries where there are nans in speed%0A df_new=df_new.ix%5Bpd.isnull(df_new.sog)==False%5D%0A return df_new%0A
|
|
c9ef02e2c8c48affed869bbbd9649ebfaded0143
|
fix login
|
src/son_editor/app/__main__.py
|
src/son_editor/app/__main__.py
|
'''
Created on 18.07.2016
@author: Jonas
'''
import logging
import urllib
from os import path
from sys import platform
from flask import Flask, session
from flask.globals import request
from flask_restplus import Api
from son_editor import apis
from son_editor.app.database import db_session, init_db, scan_workspaces_dir
from son_editor.app.exceptions import NameConflict, NotFound, ExtNotReachable, PackException, InvalidArgument, \
UnauthorizedException
from son_editor.util.requestutil import CONFIG, prepare_response, prepare_error
from son_editor.app.securityservice import check_access
from son_editor.util.requestutil import CONFIG, prepare_response, prepare_error
app = Flask(__name__)
# turn off help message for 404 errors, just return error handlers message
app.config["ERROR_404_HELP"] = False
app.config["RESTPLUS_MASK_SWAGGER"] = False
# load secret key from config
app.secret_key = CONFIG['session']['secretKey']
api = Api(app, description="Son Editor Backend API")
logger = logging.getLogger(__name__)
@api.errorhandler(KeyError)
def handle_key_error(err):
logger.exception(err.args[0])
return prepare_error({"message": "Key '{}' is required in request data!".format(err.args[0])}, 400)
@api.errorhandler(NotFound)
def handle_not_found(err):
logger.warn(err.msg)
return prepare_error({"message": err.msg}, 404)
@api.errorhandler(InvalidArgument)
def handle_invalid_argument(err):
logger.warn(err.msg)
return prepare_error({"message": err.msg}, 400)
@api.errorhandler(ExtNotReachable)
def handle_not_reachable(err):
logger.warn(err.msg)
return prepare_error({"message": err.msg}, 404)
@api.errorhandler(NameConflict)
def handle_name_conflict(err):
logger.warn(err.msg)
return prepare_error({"message": err.msg}, 409)
@api.errorhandler(PackException)
def handle_pack_exception(err):
logger.warn(err.msg)
return prepare_error({"message": err.msg}, 409)
@api.errorhandler
def handle_general_exception(err):
logger.exception(str(err))
return prepare_error({"message": str(err)}, getattr(err, 'code', 500))
# registering all the api resources here
apis.init(api)
# print(app.url_map)
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
@app.before_request
def check_logged_in():
if request.method == 'OPTIONS':
return prepare_response()
elif CONFIG['testing']:
# Check if the user is allowed access the requested workspace resource (even for tests)
check_access(request)
return
# Check if the user is not logged in
elif 'access_token' not in session and request.endpoint not in ['login', 'static', 'shutdown']:
# show request for github login
return handle_unauthorized("Please log in")
# Check if the user is allowed access the requested workspace resource
try:
check_access(request)
except UnauthorizedException as uae:
return handle_unauthorized(uae.msg)
def handle_unauthorized(msg: str):
args = {"scope": "user:email",
"client_id": CONFIG['authentication']['ClientID']}
session["requested_endpoint"] = request.endpoint
return prepare_response({
'authorizationUrl': 'https://github.com/login/oauth/authorize/?{}'.format(urllib.parse.urlencode(args)),
"message": msg}, 401)
def setup():
setup_logging()
# Check check if database exists, otherwise create sqlite file
dbFile = CONFIG['database']['location']
if path.exists(dbFile):
logger.info('Using database file "%s"' % dbFile)
else:
logger.info('Init database on "%s"' % dbFile)
init_db()
# parse all workspaces already on the hard drive
scan_workspaces_dir()
# Start the flask server
logger.info("Launch flask server")
# Main entry point
def main(args=None):
if platform == "darwin":
app.run('0.0.0.0', debug=True)
else:
app.run('0.0.0.0')
def setup_logging():
# set up logging to file - see previous section for more details
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename='editor-backend.log',
filemode='w')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
setup()
if __name__ == "__main__":
main()
|
Python
| 0.000002 |
@@ -2300,24 +2300,75 @@
ogged_in():%0A
+ if request.endpoint == 'login':%0A return%0A
if reque
@@ -2707,17 +2707,8 @@
in %5B
-'login',
'sta
|
3b064d6933ef7e910fab5634420358562866f1bc
|
Add test
|
tests/test_camera.py
|
tests/test_camera.py
|
Python
| 0.000005 |
@@ -0,0 +1,884 @@
+# coding: utf-8%0Afrom __future__ import unicode_literals%0Aimport unittest%0Aimport tempfile%0Aimport shutil%0A%0Afrom flask import Flask%0Afrom pitools import camera%0A%0Aapp = Flask(__name__)%0Aapp.register_blueprint(camera.blueprint)%0A%0A%0Aclass CameraTestCase(unittest.TestCase):%0A%0A def setUp(self):%0A self.workspace = tempfile.mkdtemp()%0A self.app = app.test_client()%0A%0A def tearDown(self):%0A shutil.rmtree(self.workspace)%0A%0A def test_post_shot_api(self):%0A '''%0A Should fail with 405 method not allowed%0A '''%0A rv = self.app.post('/camera/shot')%0A assert 405 == rv.status_code%0A%0A def test_get_shot_api(self):%0A '''%0A Should return a image with image/* MIME%0A '''%0A rv = self.app.get('/camera/shot')%0A assert rv.content_type.startswith('image/')%0A print dir(rv)%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
|
|
7ddfb39256229aa8c985ed8d70a29479187c76ad
|
Create script for beta invites
|
lily/management/commands/generate_beta_invites.py
|
lily/management/commands/generate_beta_invites.py
|
Python
| 0 |
@@ -0,0 +1,2338 @@
+import csv%0Aimport gc%0Aimport logging%0Afrom datetime import date%0Afrom hashlib import sha256%0A%0Afrom django.conf import settings%0Afrom django.core.files.storage import default_storage%0Afrom django.core.management import call_command%0Afrom django.core.management.base import BaseCommand%0Afrom django.core.urlresolvers import reverse_lazy%0A%0Afrom lily.tenant.models import Tenant%0A%0Alogger = logging.getLogger(__name__)%0A%0A%0Aclass Command(BaseCommand):%0A def handle(self, **kwargs):%0A current_site = 'app.hellolily.com'%0A%0A with default_storage.open('beta_signups_with_invites.csv', 'wb') as csvfile:%0A spamwriter = csv.writer(csvfile, delimiter=',')%0A spamwriter.writerow(%5B'company', 'email', 'first_name', 'last_name', 'invite', 'country'%5D)%0A%0A for row in self.read_csvfile('beta_signups.csv'):%0A company = row%5B'company'%5D%0A first_name = row%5B'first_name'%5D%0A last_name = row%5B'last_name'%5D%0A email = row%5B'email'%5D%0A country = row%5B'country'%5D%0A date_string = date.today().strftime('%25d%25m%25Y')%0A%0A tenant = Tenant.objects.create(name=company, country=country)%0A%0A call_command('create_tenant', tenant=tenant.id)%0A%0A invite_hash = sha256('%25s-%25s-%25s-%25s' %25 (%0A tenant.id,%0A email,%0A date_string,%0A settings.SECRET_KEY%0A )).hexdigest()%0A%0A invite_link = '%25s://%25s%25s' %25 ('https', current_site, reverse_lazy('invitation_accept', kwargs=%7B%0A 'tenant_id': tenant.id,%0A 'first_name': first_name,%0A 'email': email,%0A 'date': date_string,%0A 'hash': invite_hash,%0A %7D))%0A%0A spamwriter.writerow(%5Bcompany, email, first_name, last_name, invite_link, country%5D)%0A%0A gc.collect()%0A%0A def read_csvfile(self, file_name):%0A %22%22%22%0A Read from path assuming it's a file with ';' separated values.%0A %22%22%22%0A # Newlines are breaking correct csv parsing. Write correct temporary file to parse.%0A csv_file = default_storage.open(file_name, 'rU')%0A reader = csv.DictReader(csv_file, delimiter=';', quoting=csv.QUOTE_ALL)%0A for row in reader:%0A yield row%0A
|
|
5bc089a98bf578fd0c56e3e50cf76888ee74aba2
|
Add py solution for 537. Complex Number Multiplication
|
py/complex-number-multiplication.py
|
py/complex-number-multiplication.py
|
Python
| 0.000635 |
@@ -0,0 +1,465 @@
+import re%0Aclass Solution(object):%0A def complexNumberMultiply(self, a, b):%0A %22%22%22%0A :type a: str%0A :type b: str%0A :rtype: str%0A %22%22%22%0A pat = re.compile(r'(-?%5Cd+)%5C+(-?%5Cd+)i')%0A mata = pat.match(a)%0A matb = pat.match(b)%0A a = int(mata.group(1)), int(mata.group(2))%0A b = int(matb.group(1)), int(matb.group(2))%0A ans = a%5B0%5D * b%5B0%5D - a%5B1%5D * b%5B1%5D, a%5B1%5D * b%5B0%5D + a%5B0%5D * b%5B1%5D%0A return '%25d+%25di' %25 ans%0A
|
|
400ad736a271946569efa438e8fc9d00a7ce0075
|
test for #22
|
tests/test_issues.py
|
tests/test_issues.py
|
Python
| 0.000001 |
@@ -0,0 +1,1835 @@
+from tgbot import plugintest%0Afrom tgbot.botapi import Update%0Afrom test_plugin import TestPlugin%0A%0A%0Aclass TestPluginTest(plugintest.PluginTestCase):%0A def setUp(self):%0A self.plugin = TestPlugin()%0A self.bot = self.fake_bot(%0A '',%0A plugins=%5Bself.plugin%5D,%0A )%0A self.received_id = 1%0A%0A def test_user_update(self):%0A %22%22%22Test for issue #22%22%22%22%0A sender = %7B%0A 'id': 1,%0A 'first_name': 'John',%0A 'last_name': 'Doe',%0A %7D%0A self.receive_message('test', sender=sender)%0A self.assertEqual(self.bot.models.User.get(self.bot.models.User.id == 1).first_name, 'John')%0A%0A sender%5B'first_name'%5D = 'Paul'%0A self.receive_message('test', sender=sender)%0A self.assertEqual(self.bot.models.User.get(self.bot.models.User.id == 1).first_name, 'Paul')%0A%0A def receive_message(self, text, sender=None, chat=None, reply_to_message_id=None):%0A if sender is None:%0A sender = %7B%0A 'id': 1,%0A 'first_name': 'John',%0A 'last_name': 'Doe',%0A %7D%0A%0A if chat is None:%0A chat = %7B'type': 'private'%7D%0A chat.update(sender)%0A%0A reply_to_message = None%0A%0A if reply_to_message_id is not None:%0A reply_to_message = %7B%0A 'message_id': reply_to_message_id,%0A 'chat': chat,%0A %7D%0A%0A self.bot.process_update(%0A Update.from_dict(%7B%0A 'update_id': self.received_id,%0A 'message': %7B%0A 'message_id': self.received_id,%0A 'text': text,%0A 'chat': chat,%0A 'from': sender,%0A 'reply_to_message': reply_to_message,%0A %7D%0A %7D)%0A )%0A%0A self.received_id += 1%0A
|
|
06e82c471afa83bf0f08f0779b32dd8a09b8d1ba
|
Add py solution for 350. Intersection of Two Arrays II
|
py/intersection-of-two-arrays-ii.py
|
py/intersection-of-two-arrays-ii.py
|
Python
| 0.001158 |
@@ -0,0 +1,297 @@
+from collections import Counter%0Aclass Solution(object):%0A def intersect(self, nums1, nums2):%0A %22%22%22%0A :type nums1: List%5Bint%5D%0A :type nums2: List%5Bint%5D%0A :rtype: List%5Bint%5D%0A %22%22%22%0A c1, c2 = Counter(nums1), Counter(nums2)%0A return list((c1 & c2).elements())%0A
|
|
742e827178ee28663699acbb4a5f0ad5440649fc
|
add new keyboard_locks module
|
py3status/modules/keyboard_locks.py
|
py3status/modules/keyboard_locks.py
|
Python
| 0.000001 |
@@ -0,0 +1,2956 @@
+# -*- coding: utf-8 -*-%0A%22%22%22%0AMonitor CapsLock, NumLock, and ScrLock keys%0A%0ANumLock: Allows the user to type numbers by pressing the keys on the number pad,%0Arather than having them act as up, down, left, right, page up, end, and so forth.%0A%0ACapsLock: When enabled, letters the user types will be in uppercase by default%0Arather than lowercase.%0A%0AScrLock: In some applications, such as spreadsheets, the lock mode is used to%0Achange the behavior of the cursor keys to scroll the document instead of the cursor.%0A%0AConfiguration parameters:%0A cache_timeout: refresh interval for this module (default 1)%0A icon_capslock_off: show when Caps Lock is off (default 'CAP')%0A icon_capslock_on: show when Caps Lock is on (default 'CAP')%0A icon_numlock_off: show when Num Lock is off (default 'NUM')%0A icon_numlock_on: show when Num Lock is off (default 'NUM')%0A icon_scrlock_off: show when Scroll Lock is off (default 'SCR')%0A icon_scrlock_on: show when Scroll Lock is on (default 'SCR')%0A%0AColor options:%0A color_good: Lock on%0A color_bad: Lock off%0A%0A@author lasers%0A%22%22%22%0A%0A%0Aclass Py3status:%0A %22%22%22%0A %22%22%22%0A # available configuration parameters%0A cache_timeout = 1%0A icon_capslock_off = %22CAP%22%0A icon_capslock_on = %22CAP%22%0A icon_numlock_off = %22NUM%22%0A icon_numlock_on = %22NUM%22%0A icon_scrlock_off = %22SCR%22%0A icon_scrlock_on = %22SCR%22%0A%0A def keyboard_lock(self):%0A out = self.py3.command_output('xset -q')%0A%0A capslock_color = self.py3.COLOR_BAD%0A capslock_icon = self.icon_capslock_off%0A numlock_color = self.py3.COLOR_BAD%0A numlock_icon = self.icon_numlock_off%0A scrlock_color = self.py3.COLOR_BAD%0A scrlock_icon = self.icon_scrlock_off%0A%0A if 'on' in out.split(%22Caps Lock:%22)%5B1%5D%5B0:6%5D:%0A capslock_color = self.py3.COLOR_GOOD%0A capslock_icon = self.icon_capslock_on%0A%0A if 'on' in out.split(%22Num Lock:%22)%5B1%5D%5B0:6%5D:%0A numlock_color = self.py3.COLOR_GOOD%0A numlock_icon = self.icon_numlock_on%0A%0A if 'on' in out.split(%22Scroll Lock:%22)%5B1%5D%5B0:6%5D:%0A scrlock_color = self.py3.COLOR_GOOD%0A scrlock_icon = self.icon_scrlock_on%0A%0A return %7B%0A 'cached_until': self.py3.time_in(self.cache_timeout),%0A 'composite': %5B%0A %7B%0A 'color': capslock_color,%0A 'full_text': capslock_icon,%0A %7D,%0A %7B%0A 'full_text': ' '%0A %7D,%0A %7B%0A 'color': numlock_color,%0A 'full_text': numlock_icon,%0A %7D,%0A %7B%0A 'full_text': ' '%0A %7D,%0A %7B%0A 'color': scrlock_color,%0A 'full_text': scrlock_icon,%0A %7D,%0A %5D%0A %7D%0A%0A%0Aif __name__ == %22__main__%22:%0A %22%22%22%0A Run module in test mode.%0A %22%22%22%0A from py3status.module_test import module_test%0A module_test(Py3status)%0A
|
|
9d7c348170fc0f9d339a2ef57a9e64b1ceaa7516
|
Add demo MNH event scraper
|
web/whim/core/scrapers/mnh.py
|
web/whim/core/scrapers/mnh.py
|
Python
| 0 |
@@ -0,0 +1,2891 @@
+from datetime import datetime, timezone, time%0A%0Aimport requests%0Afrom bs4 import BeautifulSoup%0A%0Afrom django.db import transaction%0A%0Afrom .base import BaseScraper%0Afrom .exceptions import ScraperException%0A%0Afrom whim.core.models import Event, Source, Category%0Afrom whim.core.utils import get_object_or_none%0Afrom whim.core.time import zero_time_with_timezone%0A%0A%0Aclass MNHScraper(BaseScraper):%0A def get_data(self):%0A url = %22https://manxnationalheritage.im/whats-on/%22%0A parsed = %5B%5D%0A page = requests.get(url)%0A if page.status_code == 200:%0A soup = BeautifulSoup(page.content, 'html.parser')%0A events = soup.select(%0A %22div.columns.no-padding-grid.push-top-m %3E div %3E a%22)%0A parsed = %5B%5D%0A for e in events:%0A tmp = %7B%0A %22link%22: e.get('href'),%0A %22category%22: e.find(%22span%22, %7B%22class%22: %22badge%22%7D).string%0A %7D%0A #get rest of data%0A article = e.find(%22div%22, %7B%22class%22: %22text%22%7D)%0A if article:%0A tmp%5B%22name%22%5D = article.contents%5B0%5D.string #h2%0A tmp%5B%22description%22%5D = article.contents%5B3%5D.contents%5B%0A 0%5D.string #p%0A #dates%0A try:%0A dates = article.contents%5B2%5D.contents%5B0%5D.string.replace(%0A %22 %22, %22%22).replace(%22%E2%80%93%22, %22-%22).split(%22-%22) #span%0A tmp%5B%22start_date%22%5D = zero_time_with_timezone(%0A datetime.strptime(dates%5B0%5D, %22%25d/%25m/%25Y%22))%0A if len(dates) %3E 1:%0A tmp%5B%22end_date%22%5D = zero_time_with_timezone(%0A datetime.strptime(dates%5B1%5D, %22%25d/%25m/%25Y%22))%0A except:%0A continue%0A parsed.append(tmp)%0A return parsed%0A else:%0A raise ScraperException(%22Unexpected status code%22)%0A%0A @transaction.atomic%0A def run(self, source_id):%0A source = Source.objects.get(id=source_id)%0A for scraped_event in self.get_data():%0A event = get_object_or_none(%0A Event, source=source, name=scraped_event%5B%22name%22%5D)%0A if event is None:%0A category, _ = Category.objects.get_or_create_from_name(%0A scraped_event%5B%22category%22%5D)%0A Event.objects.create(%0A source=source,%0A category=category,%0A name=scraped_event%5B%22name%22%5D,%0A description=scraped_event%5B%22description%22%5D,%0A start_datetime=scraped_event%5B%22start_date%22%5D,%0A end_datetime=scraped_event.get(%22end_date%22),%0A link=scraped_event%5B%22link%22%5D,%0A tags=%5B%5D)%0A #mark this run%0A source.last_run_date = datetime.now(timezone.utc)%0A source.save()%0A
|
|
0f208d6b08968c779bd3aafa6e43763419eb117b
|
fix mac builds
|
gyp/debugger.gyp
|
gyp/debugger.gyp
|
{
'targets': [
{
'target_name': 'debugger',
'type': 'executable',
'include_dirs' : [
'../src/core',
'../debugger', # To pull SkDebugger.h
'../debugger/QT', # For all the QT UI Goodies
'../src/gpu', # To pull gl/GrGLUtil.h
],
'sources': [
'../debugger/debuggermain.cpp',
'../debugger/SkDebugCanvas.h',
'../debugger/SkDebugCanvas.cpp',
'../debugger/SkDebugger.cpp',
'../debugger/SkDrawCommand.h',
'../debugger/SkDrawCommand.cpp',
'../debugger/QT/moc_4.6.2_SkCanvasWidget.cpp',
'../debugger/QT/moc_4.6.2_SkDebuggerGUI.cpp',
'../debugger/QT/moc_4.6.2_SkInspectorWidget.cpp',
'../debugger/QT/moc_4.6.2_SkSettingsWidget.cpp',
'../debugger/QT/moc_4.6.2_SkRasterWidget.cpp',
'../debugger/QT/moc_4.6.2_SkGLWidget.cpp',
'../debugger/QT/SkDebuggerGUI.cpp',
'../debugger/QT/SkDebuggerGUI.h',
'../debugger/QT/SkCanvasWidget.cpp',
'../debugger/QT/SkCanvasWidget.h',
'../debugger/QT/SkInspectorWidget.h',
'../debugger/QT/SkInspectorWidget.cpp',
'../debugger/QT/SkListWidget.h',
'../debugger/QT/SkListWidget.cpp',
'../debugger/SkObjectParser.h',
'../debugger/SkObjectParser.cpp',
'../debugger/QT/SkSettingsWidget.h',
'../debugger/QT/SkSettingsWidget.cpp',
'../debugger/QT/SkGLWidget.h',
'../debugger/QT/SkGLWidget.cpp',
'../debugger/QT/SkRasterWidget.h',
'../debugger/QT/SkRasterWidget.cpp',
# To update this file edit SkIcons.qrc and rerun rcc to generate cpp
'../debugger/QT/qrc_SkIcons.cpp',
],
'dependencies': [
'core.gyp:core',
'images.gyp:images',
'ports.gyp:ports',
'effects.gyp:effects',
'gpu.gyp:gr',
'gpu.gyp:skgr',
],
'conditions': [
[ 'skia_os in ["linux", "freebsd", "openbsd", "solaris"]', {
'include_dirs': [
'/usr/include/qt4',
'/usr/include/qt4/QtCore',
'/usr/include/qt4/QtGui',
'/usr/include/qt4/QtOpenGL',
],
'link_settings': {
'libraries' : [
'-lQtCore',
'-lQtGui',
'-lQtOpenGL'
],
},
}],
[ 'skia_os == "mac" and skia_arch_width != 64', {
'error': '<!(skia_arch_width must be 64 bits when building the debugger on mac)'
}],
[ 'skia_os == "mac"', {
'mac_bundle' : 1,
'include_dirs': [
'/Library/Frameworks/QtCore.framework/Headers/',
'/Library/Frameworks/QtGui.framework/Headers/',
'/Library/Frameworks/QtOpenGL.framework/Headers/',
],
'link_settings': {
'libraries': [
'/Library/Frameworks/QtCore.framework',
'/Library/Frameworks/QtGui.framework',
'/Library/Frameworks/QtOpenGL.framework',
],
},
}],
[ 'skia_os in ["ios"]', {
'error': '<!(debugger unsupported with skia_os=<(skia_os))'
}],
[ 'skia_os == "win"', {
'include_dirs': [
# TODO(chudy): Dynamically generate these paths?
'C:/Qt/4.6.4/include',
'C:/Qt/4.6.4/include/QtCore',
'C:/Qt/4.6.4/include/QtGui',
'C:/Qt/4.6.4/include/QtOpenGL',
],
'link_settings': {
'libraries': [
'C:/Qt/4.6.4/lib/QtCore4.lib',
'C:/Qt/4.6.4/lib/QtGui4.lib',
'C:/Qt/4.6.4/lib/QtOpenGL.lib',
],
},
}],
]
},
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:
|
Python
| 0.000001 |
@@ -2392,24 +2392,81 @@
h != 64', %7B%0A
+# this is now mysteriously triggered for all mac builds%0A#
'e
|
423554349177a5c8ed987f249b13fac9c8b8d79a
|
Add links to upgrade actions in the change log
|
gen-changelog.py
|
gen-changelog.py
|
# Writes a changelog in trac WikiFormatting based on a git log
from __future__ import unicode_literals, division, absolute_import
import codecs
from itertools import ifilter
import os
import re
import subprocess
import sys
import dateutil.parser
out_path = 'ChangeLog'
if len(sys.argv) > 1:
dir_name = os.path.dirname(sys.argv[1])
if dir_name and not os.path.isdir(dir_name):
print 'Output dir doesn\'t exist: %s' % sys.argv[1]
sys.exit(1)
out_path = sys.argv[1]
# 1.0.3280 was last revision on svn
git_log_output = subprocess.check_output(['git', 'log', '--pretty=%n---%n.%d%n%ci%n%h%n%s%n%-b%n---%n',
'--topo-order', '--decorate=full','refs/tags/1.0.3280..HEAD'])
git_log_iter = ifilter(None, git_log_output.decode('utf-8').splitlines())
with codecs.open(out_path, 'w', encoding='utf-8') as out_file:
for line in git_log_iter:
assert line == '---'
tag = re.search('refs/tags/([\d.]+)', next(git_log_iter))
date = dateutil.parser.parse(next(git_log_iter))
commit_hash = next(git_log_iter)
body = list(iter(git_log_iter.next, '---'))
if tag:
out_file.write('\n=== %s (%s) ===\n\n' % (tag.group(1), date.strftime('%Y.%m.%d')))
out_file.write(' * (%s) %s\n' % (commit_hash, '[[BR]]\n '.join(body)))
|
Python
| 0 |
@@ -240,16 +240,73 @@
l.parser
+%0Aimport requests%0A%0Afrom flexget.utils.soup import get_soup
%0A%0Aout_pa
@@ -545,16 +545,123 @@
rgv%5B1%5D%0A%0A
+ua_response = requests.get('http://flexget.com/wiki/UpgradeActions')%0Aua_soup = get_soup(ua_response.text)%0A%0A
# 1.0.32
@@ -1326,16 +1326,255 @@
if tag:%0A
+ ver = tag.group(1)%0A ua_link = ''%0A result = ua_soup.find('h3', text=re.compile(re.escape(ver)))%0A if result:%0A ua_link = '%5E%5Bwiki:UpgradeActions#%25s upgrade actions%5D%5E ' %25 result%5B'id'%5D%0A
@@ -1607,16 +1607,18 @@
%25s (%25s)
+%25s
===%5Cn%5Cn'
@@ -1621,28 +1621,19 @@
n%5Cn' %25 (
-tag.group(1)
+ver
, date.s
@@ -1651,16 +1651,25 @@
.%25m.%25d')
+, ua_link
))%0A
|
35b1fc5e43f553e95ad4c8a42c37ca66639d9120
|
add test for core.py
|
HARK/tests/test_core.py
|
HARK/tests/test_core.py
|
Python
| 0 |
@@ -0,0 +1,371 @@
+%22%22%22%0AThis file implements unit tests for interpolation methods%0A%22%22%22%0Afrom HARK.core import HARKobject%0A%0Aimport numpy as np%0Aimport unittest%0A%0Aclass testHARKobject(unittest.TestCase):%0A def setUp(self):%0A self.obj_a = HARKobject()%0A self.obj_b = HARKobject()%0A%0A def test_distance(self):%0A self.assertRaises(AttributeError, self.obj_a.distance(self.obj_b))%0A
|
|
be17fa5026fd7cd64ccfc6e7241137a3f864725b
|
add google doc generator
|
generate_gpad.py
|
generate_gpad.py
|
Python
| 0 |
@@ -0,0 +1,1087 @@
+import httplib2%0Aimport webbrowser%0A%0Afrom apiclient.discovery import build%0Afrom oauth2client import client %0A%0Aflow = client.flow_from_clientsecrets(%0A 'client_secret.json',%0A scope=%5B'https://www.googleapis.com/auth/drive.file',%0A 'https://www.googleapis.com/auth/urlshortener'%5D,%0A redirect_uri='urn:ietf:wg:oauth:2.0:oob')%0A%0Awebbrowser.open(flow.step1_get_authorize_url())%0Aauth_code = raw_input('Enter the auth code: ')%0Acredentials = flow.step2_exchange(auth_code)%0Ahttp = credentials.authorize(httplib2.Http())%0A%0Aservice = build('drive', 'v2', http)%0A%0Abody = %7B %0A 'mimeType': 'application/vnd.google-apps.document',%0A 'title': 'hodor' %0A%7D%0Afile = service.files().insert(body=body).execute()%0A%0Abody = %7B%0A 'role': 'writer',%0A 'type': 'anyone',%0A 'withLink': True%0A%7D%0Aservice.permissions().insert(fileId=file%5B'id'%5D, body=body).execute()%0A%0Afile = service.files().get(fileId=file%5B'id'%5D).execute()%0Ashare = file%5B'alternateLink'%5D%0A%0Aservice = build('urlshortener', 'v1', http)%0A%0Abody = %7B 'longUrl': share %7D%0Ashort = service.url().insert(body=body).execute()%0Aprint short%5B'id'%5D%0A
|
|
f9e6176bc43262882a0d50f4d850c04c3460b9d8
|
Add SS :-)
|
rna_pdb_tools/SecondaryStructure.py
|
rna_pdb_tools/SecondaryStructure.py
|
Python
| 0 |
@@ -0,0 +1,783 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0A%22%22%22Seq and secondary structure prediction%22%22%22%0A%0Aimport os%0Aimport tempfile%0Aimport shutil%0A%0AVARNA_PATH = '/Users/magnus/skills/rnax/varna_tut/'%0A%0Adef draw_ss(title,seq, ss, img_out):%0A %22%22%22%22%22%22%0A curr = os.getcwd()%0A os.chdir(VARNA_PATH)#VARNAv3-93-src')%0A print os.getcwd()%0A t = tempfile.NamedTemporaryFile(delete=False)%0A t.name += '.png'%0A os.system('java -cp VARNA.jar fr.orsay.lri.varna.applications.VARNAcmd -sequenceDBN ' + seq + %22 -structureDBN '%22 + ss + %22' -o %22 + t.name + %22 -title %22 + title + %22 -resolution '2.0'%22)%0A os.chdir(curr)%0A print img_out%0A shutil.move(t.name, img_out)%0A %0Aif __name__ == '__main__':%0A seq = 'AAAAAAA'%0A ss = '((...))'%0A img_out = 'out.png'%0A draw_ss('rna', seq, ss, img_out)%0A
|
|
e5fed1895b69d824e3dc773dd6c6f88974e24f67
|
discard module (#61452)
|
lib/ansible/modules/network/checkpoint/cp_mgmt_discard.py
|
lib/ansible/modules/network/checkpoint/cp_mgmt_discard.py
|
Python
| 0 |
@@ -0,0 +1,2164 @@
+#!/usr/bin/python%0A# -*- coding: utf-8 -*-%0A#%0A# Ansible module to manage CheckPoint Firewall (c) 2019%0A#%0A# Ansible is free software: you can redistribute it and/or modify%0A# it under the terms of the GNU General Public License as published by%0A# the Free Software Foundation, either version 3 of the License, or%0A# (at your option) any later version.%0A#%0A# Ansible is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU General Public License for more details.%0A#%0A# You should have received a copy of the GNU General Public License%0A# along with Ansible. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A#%0A%0Afrom __future__ import (absolute_import, division, print_function)%0A%0A__metaclass__ = type%0A%0AANSIBLE_METADATA = %7B'metadata_version': '1.1',%0A 'status': %5B'preview'%5D,%0A 'supported_by': 'community'%7D%0A%0ADOCUMENTATION = %22%22%22%0A---%0Amodule: cp_mgmt_discard%0Ashort_description: All changes done by user are discarded and removed from database.%0Adescription:%0A - All changes done by user are discarded and removed from database.%0A - All operations are performed over Web Services API.%0Aversion_added: %222.9%22%0Aauthor: %22Or Soffer (@chkp-orso)%22%0Aoptions:%0A uid:%0A description:%0A - Session unique identifier. Specify it to discard a different session than the one you currently use.%0A type: str%0Aextends_documentation_fragment: checkpoint_commands%0A%22%22%22%0A%0AEXAMPLES = %22%22%22%0A- name: discard%0A cp_mgmt_discard:%0A%22%22%22%0A%0ARETURN = %22%22%22%0Acp_mgmt_discard:%0A description: The checkpoint discard output.%0A returned: always.%0A type: dict%0A%22%22%22%0A%0Afrom ansible.module_utils.basic import AnsibleModule%0Afrom ansible.module_utils.network.checkpoint.checkpoint import checkpoint_argument_spec_for_commands, api_command%0A%0A%0Adef main():%0A argument_spec = dict(%0A uid=dict(type='str')%0A )%0A argument_spec.update(checkpoint_argument_spec_for_commands)%0A%0A module = AnsibleModule(argument_spec=argument_spec)%0A%0A command = %22discard%22%0A%0A result = api_command(module, command)%0A module.exit_json(**result)%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
1bd0669e67fc082cbd496b3aa54c6a6f6a0d5fce
|
Add grab.util.log::print_dict method for fuzzy displaying of dict objects in console
|
grab/util/log.py
|
grab/util/log.py
|
Python
| 0.000001 |
@@ -0,0 +1,511 @@
+def repr_value(val):%0A if isinstance(val, unicode):%0A return val.encode('utf-8')%0A elif isinstance(val, (list, tuple)):%0A return '%5B%25s%5D' %25 ', '.join(repr_val(x) for x in val)%0A elif isinstance(val, dict):%0A return '%7B%25s%7D' %25 ', '.join('%25s: %25s' %25 (repr_val(x), repr_val(y)) for x, y in val.items())%0A else:%0A return str(val)%0A%0A%0Adef print_dict(dic):%0A print '%5B---'%0A for key, val in sorted(dic.items(), key=lambda x: x%5B0%5D):%0A print key, ':', repr_value(val)%0A print '---%5D'%0A
|
|
e5bd12b67f58c1a099c2bd2dd66b043b43969267
|
Add a tool to publish packages in the repo to pub. Review URL: https://codereview.chromium.org//11415191
|
tools/publish_pkg.py
|
tools/publish_pkg.py
|
Python
| 0.000003 |
@@ -0,0 +1,2693 @@
+#!/usr/bin/env python%0A#%0A# Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file%0A# for details. All rights reserved. Use of this source code is governed by a%0A# BSD-style license that can be found in the LICENSE file.%0A#%0A# Script to push a package to pub. %0A#%0A# Usage: publish_pkg.py pkg_dir%0A%0A%0Aimport os%0Aimport os.path%0Aimport re%0Aimport shutil%0Aimport sys%0Aimport subprocess%0Aimport tempfile%0A%0Adef ReplaceInFiles(paths, subs):%0A '''Reads a series of files, applies a series of substitutions to each, and%0A saves them back out. subs should be a list of (pattern, replace) tuples.'''%0A for path in paths:%0A contents = open(path).read()%0A for pattern, replace in subs:%0A contents = re.sub(pattern, replace, contents)%0A%0A dest = open(path, 'w')%0A dest.write(contents)%0A dest.close()%0A%0A%0Adef ReadVersion(file, field):%0A for line in open(file).read().split('%5Cn'):%0A %5Bk, v%5D = re.split('%5Cs+', line)%0A if field == k:%0A return int(v)%0A %0Adef Main(argv):%0A HOME = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))%0A%0A versionFile = os.path.join(HOME, 'tools', 'VERSION')%0A major = ReadVersion(versionFile, 'MAJOR')%0A minor = ReadVersion(versionFile, 'MINOR')%0A build = ReadVersion(versionFile, 'BUILD')%0A patch = ReadVersion(versionFile, 'PATCH')%0A %0A if major == 0 and minor %3C= 1:%0A print 'Error: Do not run this script from a bleeding_edge checkout.'%0A return -1%0A%0A version = '%25d.%25d.%25d+%25d' %25 (major, minor, build, patch)%0A%0A tmpDir = tempfile.mkdtemp()%0A pkgName = argv%5B1%5D.split('/').pop()%0A shutil.copytree(os.path.join(HOME, argv%5B1%5D), %0A os.path.join(tmpDir, pkgName))%0A%0A # Add version to pubspec file.%0A pubspec = os.path.join(tmpDir, pkgName, 'pubspec.yaml')%0A pubspecFile = open(pubspec)%0A lines = pubspecFile.readlines()%0A pubspecFile.close()%0A pubspecFile = open(pubspec, 'w')%0A foundVersion = False%0A for line in lines:%0A if line.startswith('version:'):%0A foundVersion = True%0A if line.startswith('description:') and not foundVersion:%0A pubspecFile.write('version: ' + version + '%5Cn')%0A if not line.startswith(' sdk:'):%0A pubspecFile.write(line)%0A pubspecFile.close()%0A %0A # Replace '../*/pkg' imports and parts.%0A for root, dirs, files in os.walk(os.path.join(tmpDir, pkgName)):%0A for name in files:%0A if name.endswith('.dart'):%0A ReplaceInFiles(%5Bos.path.join(root, name)%5D, %0A %5B(r'(import%7Cpart)(%5Cs+)(%5C'%7C%22)(%5C.%5C./)+pkg/', r'%5C1%5C2%5C3package:')%5D)%0A %0A print 'publishing version ' + version + ' of ' + argv%5B1%5D + ' to pub%5Cn'%0A print tmpDir%0A subprocess.call(%5B'pub', 'publish'%5D, cwd=os.path.join(tmpDir, pkgName))%0A shutil.rmtree(tmpDir)%0A%0Aif __name__ == '__main__':%0A sys.exit(Main(sys.argv))%0A
|
|
f734cbd91ff8997b9f2aac6bbec2238f8b5f7511
|
Create __init__.py
|
graf/__init__.py
|
graf/__init__.py
|
Python
| 0.000429 |
@@ -0,0 +1,2 @@
+ %0A
|
|
a1c2423c349757f4725ef1250b9de084a469683c
|
Fix indentation
|
ceph_medic/checks/cluster.py
|
ceph_medic/checks/cluster.py
|
from ceph_medic import metadata
#
# Error checks
#
def check_osds_exist():
code = 'ECLS1'
msg = 'There are no OSDs available'
osd_count = len(metadata['osds'].keys())
if not osd_count:
return code, msg
def check_nearfull():
"""
Checks if the osd capacity is at nearfull
"""
code = 'ECLS2'
msg = 'Cluster is nearfull'
try:
osd_map = metadata['cluster']['status']['osdmap']['osdmap']
except KeyError:
return
if osd_map['nearfull']:
return code, msg
|
Python
| 0.000065 |
@@ -223,16 +223,17 @@
e, msg%0A%0A
+%0A
def chec
@@ -246,28 +246,24 @@
full():%0A
-
%22%22%22%0A
@@ -250,28 +250,24 @@
():%0A %22%22%22%0A
-
Checks i
@@ -300,20 +300,16 @@
earfull%0A
-
%22%22%22%0A
@@ -312,20 +312,16 @@
%22%22%22%0A
-
-
code = '
@@ -327,20 +327,16 @@
'ECLS2'%0A
-
msg
@@ -359,20 +359,16 @@
arfull'%0A
-
try:
@@ -376,24 +376,16 @@
-
-
osd_map
@@ -436,20 +436,16 @@
sdmap'%5D%0A
-
exce
@@ -469,27 +469,15 @@
-
-
return%0A
-
@@ -500,24 +500,16 @@
full'%5D:%0A
-
@@ -504,28 +504,29 @@
'%5D:%0A return code, msg
+%0A
|
84990a4ef20c2e0f42133ed06ade5ce2d4e98ae3
|
Save team member picture with extension.
|
chmvh_website/team/models.py
|
chmvh_website/team/models.py
|
from django.db import models
def team_member_image_name(instance, filename):
return 'team/{0}'.format(instance.name)
class TeamMember(models.Model):
bio = models.TextField(
verbose_name='biography')
name = models.CharField(
max_length=50,
unique=True,
verbose_name='name')
picture = models.ImageField(
blank=True,
null=True,
upload_to=team_member_image_name)
def __str__(self):
"""Return the team member's name"""
return self.name
|
Python
| 0 |
@@ -1,8 +1,19 @@
+import os%0A%0A
from dja
@@ -83,16 +83,57 @@
ename):%0A
+ _, ext = os.path.splitext(filename)%0A%0A
retu
@@ -144,16 +144,19 @@
team/%7B0%7D
+%7B1%7D
'.format
@@ -169,16 +169,21 @@
nce.name
+, ext
)%0A%0A%0Aclas
|
bf993439a7c53bcffe099a61138cf8c17c39f943
|
Add Partner label factory
|
accelerator/migrations/0066_partnerlabel.py
|
accelerator/migrations/0066_partnerlabel.py
|
Python
| 0 |
@@ -0,0 +1,995 @@
+# Generated by Django 2.2.10 on 2021-08-24 13:27%0A%0Afrom django.conf import settings%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('accelerator', '0065_organization_note'),%0A %5D%0A%0A operations = %5B%0A migrations.CreateModel(%0A name='PartnerLabel',%0A fields=%5B%0A ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),%0A ('created_at', models.DateTimeField(auto_now_add=True, null=True)),%0A ('updated_at', models.DateTimeField(auto_now=True, null=True)),%0A ('label', models.CharField(max_length=255)),%0A ('partners', models.ManyToManyField(blank=True, to=settings.ACCELERATOR_PARTNER_MODEL)),%0A %5D,%0A options=%7B%0A 'abstract': False,%0A 'managed': True,%0A 'swappable': 'ACCELERATOR_PARTNERLABEL_MODEL',%0A %7D,%0A ),%0A %5D%0A
|
|
6f9dcee86d986f05e289b39f6b4700d5d302f551
|
add tests for base models
|
jsonrpc/tests/test_base.py
|
jsonrpc/tests/test_base.py
|
Python
| 0 |
@@ -0,0 +1,851 @@
+%22%22%22 Test base JSON-RPC classes.%22%22%22%0Aimport unittest%0A%0Afrom ..base import JSONRPCBaseRequest, JSONRPCBaseResponse%0A%0A%0Aclass TestJSONRPCBaseRequest(unittest.TestCase):%0A%0A %22%22%22 Test JSONRPCBaseRequest functionality.%22%22%22%0A%0A def test_data(self):%0A request = JSONRPCBaseRequest()%0A self.assertEqual(request.data, %7B%7D)%0A%0A with self.assertRaises(ValueError):%0A request.data = %5B%5D%0A%0A with self.assertRaises(ValueError):%0A request.data = None%0A%0A%0Aclass TestJSONRPCBaseResponse(unittest.TestCase):%0A%0A %22%22%22 Test JSONRPCBaseResponse functionality.%22%22%22%0A%0A def test_data(self):%0A response = JSONRPCBaseResponse(result=%22%22)%0A self.assertEqual(response.data, %7B%7D)%0A%0A with self.assertRaises(ValueError):%0A response.data = %5B%5D%0A%0A with self.assertRaises(ValueError):%0A response.data = None%0A
|
|
216b96e7f36d8b72ccd3ddf6809f0cc5af14d15a
|
Add fat_ready.py
|
fat_ready.py
|
fat_ready.py
|
Python
| 0.00007 |
@@ -0,0 +1,845 @@
+#!/usr/bin/env python3%0A'''Make all files in a directory suitable for copying to a FAT filesystem.%0A'''%0A%0Afrom __future__ import print_function%0A%0Aimport os%0Aimport os.path%0Aimport sys%0A%0Afrom six import u%0A%0Aif __name__ == u('__main__'):%0A if len(sys.argv) != 2:%0A print(u('Usage: %7B%7D %3Cdirectory to make FAT ready%3E').format(sys.argv%5B0%5D),%0A file=sys.stderr)%0A sys.exit(1)%0A%0A fat_ready_dir = sys.argv%5B1%5D%0A for root, dirs, files in os.walk(fat_ready_dir):%0A for name in files:%0A if u(':') in name:%0A new_name = name.replace(u(':'), u(' '))%0A%0A full_path_old = os.path.join(root, name)%0A full_path_new = os.path.join(root, new_name)%0A%0A print(u('Renaming %7B%7D to %7B%7D').format(full_path_old, full_path_new))%0A os.rename(full_path_old, full_path_new)%0A
|
|
272c67aeccbd0c8c1cb1cf3a583b3586abef1832
|
raise ImportError when SparkSQL uses old-style classes
|
blaze/sparksql.py
|
blaze/sparksql.py
|
from __future__ import absolute_import, division, print_function
import datashape
from datashape import (dshape, DataShape, Record, isdimension, Option,
discover, Tuple)
from .dispatch import dispatch
from .expr import Expr
from .compatibility import _strtypes
__all__ = []
try:
import pyspark
from pyspark import sql, RDD
from pyspark.sql import (IntegerType, FloatType, StringType, TimestampType,
StructType, StructField, ArrayType, SchemaRDD, SQLContext,
ShortType, DoubleType, BooleanType, LongType)
from pyspark import SparkContext
except ImportError:
pyspark = None
def deoption(ds):
"""
>>> deoption('int32')
ctype("int32")
>>> deoption('?int32')
ctype("int32")
"""
if isinstance(ds, str):
ds = dshape(ds)
if isinstance(ds, DataShape) and not isdimension(ds[0]):
return deoption(ds[0])
if isinstance(ds, Option):
return ds.ty
else:
return ds
if pyspark:
types = {datashape.int16: ShortType(),
datashape.int32: IntegerType(),
datashape.int64: IntegerType(),
datashape.float32: FloatType(),
datashape.float64: DoubleType(),
datashape.real: DoubleType(),
datashape.time_: TimestampType(),
datashape.date_: TimestampType(),
datashape.datetime_: TimestampType(),
datashape.bool_: BooleanType(),
datashape.string: StringType()}
rev_types = {IntegerType(): datashape.int64,
ShortType(): datashape.int32,
LongType(): datashape.int64,
FloatType(): datashape.float32,
DoubleType(): datashape.float64,
StringType(): datashape.string,
TimestampType(): datashape.datetime_,
BooleanType(): datashape.bool_}
def sparksql_to_ds(ss):
""" Convert datashape to SparkSQL type system
>>> sparksql_to_ds(IntegerType()) # doctest: +SKIP
ctype("int64")
>>> sparksql_to_ds(ArrayType(IntegerType(), False)) # doctest: +SKIP
dshape("var * int64")
>>> sparksql_to_ds(ArrayType(IntegerType(), True)) # doctest: +SKIP
dshape("var * ?int64")
>>> sparksql_to_ds(StructType([ # doctest: +SKIP
... StructField('name', StringType(), False),
... StructField('amount', IntegerType(), True)]))
dshape("{ name : string, amount : ?int64 }")
"""
if ss in rev_types:
return rev_types[ss]
if isinstance(ss, ArrayType):
elem = sparksql_to_ds(ss.elementType)
if ss.containsNull:
return datashape.var * Option(elem)
else:
return datashape.var * elem
if isinstance(ss, StructType):
return dshape(Record([[field.name, Option(sparksql_to_ds(field.dataType))
if field.nullable
else sparksql_to_ds(field.dataType)]
for field in ss.fields]))
raise NotImplementedError("SparkSQL type not known %s" % ss)
def ds_to_sparksql(ds):
""" Convert datashape to SparkSQL type system
>>> print(ds_to_sparksql('int32')) # doctest: +SKIP
IntegerType
>>> print(ds_to_sparksql('5 * int32')) # doctest: +SKIP
ArrayType(IntegerType,false)
>>> print(ds_to_sparksql('5 * ?int32')) # doctest: +SKIP
ArrayType(IntegerType,true)
>>> print(ds_to_sparksql('{name: string, amount: int32}')) # doctest: +SKIP
StructType(List(StructField(name,StringType,false),StructField(amount,IntegerType,false)))
>>> print(ds_to_sparksql('10 * {name: string, amount: ?int32}')) # doctest: +SKIP
ArrayType(StructType(List(StructField(name,StringType,false),StructField(amount,IntegerType,true))),false)
"""
if isinstance(ds, str):
return ds_to_sparksql(dshape(ds))
if isinstance(ds, Record):
return sql.StructType([
sql.StructField(name,
ds_to_sparksql(deoption(typ)),
isinstance(typ, datashape.Option))
for name, typ in ds.fields])
if isinstance(ds, DataShape):
if isdimension(ds[0]):
elem = ds.subshape[0]
if isinstance(elem, DataShape) and len(elem) == 1:
elem = elem[0]
return sql.ArrayType(ds_to_sparksql(deoption(elem)),
isinstance(elem, Option))
else:
return ds_to_sparksql(ds[0])
if ds in types:
return types[ds]
raise NotImplementedError()
@dispatch(SQLContext, RDD)
def into(sqlContext, rdd, schema=None, columns=None, **kwargs):
""" Convert a normal PySpark RDD to a SparkSQL RDD
Schema inferred by ds_to_sparksql. Can also specify it explicitly with
schema keyword argument.
"""
schema = schema or discover(rdd).subshape[0]
if isinstance(schema[0], Tuple):
columns = columns or list(range(len(schema[0].dshapes)))
types = schema[0].dshapes
schema = dshape(Record(list(zip(columns, types))))
sql_schema = ds_to_sparksql(schema)
return sqlContext.applySchema(rdd, sql_schema)
@dispatch(SQLContext, (Expr, object) + _strtypes)
def into(sqlContext, o, **kwargs):
schema = kwargs.pop('schema', None) or discover(o).subshape[0]
return into(sqlContext, into(sqlContext._sc, o), schema=schema, **kwargs)
@dispatch((tuple, list, set), SchemaRDD)
def into(a, b, **kwargs):
if not isinstance(a, type):
a = type(a)
return a(map(tuple, b.collect()))
@dispatch(SchemaRDD)
def discover(srdd):
return datashape.var * sparksql_to_ds(srdd.schema())
|
Python
| 0 |
@@ -622,16 +622,185 @@
= None%0A%0A
+%0Aif not issubclass(SQLContext, object):%0A raise ImportError(%22This version of SparkSQL uses old-style classes%22%0A %22Please update to newer version of Spark%22)%0A%0A%0A
def deop
|
257134bdaea7c250d5956c4095adf0b917b65aa6
|
Fix null case for event details
|
database/dict_converters/event_details_converter.py
|
database/dict_converters/event_details_converter.py
|
from database.dict_converters.converter_base import ConverterBase
class EventDetailsConverter(ConverterBase):
SUBVERSIONS = { # Increment every time a change to the dict is made
3: 0,
}
@classmethod
def convert(cls, event_details, dict_version):
CONVERTERS = {
3: cls.eventDetailsConverter_v3,
}
return CONVERTERS[dict_version](event_details)
@classmethod
def eventDetailsConverter_v3(cls, event_details):
event_details_dict = {
'alliances': event_details.alliance_selections,
'district_points': event_details.district_points,
'rankings': event_details.renderable_rankings,
'stats': event_details.matchstats,
}
return event_details_dict
|
Python
| 0.000017 |
@@ -561,16 +561,43 @@
lections
+ if event_details else None
,%0A
@@ -650,16 +650,43 @@
t_points
+ if event_details else None
,%0A
@@ -736,16 +736,43 @@
rankings
+ if event_details else None
,%0A
@@ -810,16 +810,43 @@
tchstats
+ if event_details else None
,%0A
|
1a296a5203c422a7eecc0be71a91994798f01c10
|
copy name->title for BehaviorAction and BehaviorSequences
|
tndata_backend/goals/migrations/0020_populate_basebehavior_title_slugs.py
|
tndata_backend/goals/migrations/0020_populate_basebehavior_title_slugs.py
|
Python
| 0 |
@@ -0,0 +1,1454 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0Afrom django.utils.text import slugify%0A%0A%0Adef _copy_name_to_title(model, apps):%0A %22%22%22Copy the values from the Model's name -%3E title and name_slug -%3E title_slug.%22%22%22%0A M = apps.get_model(%22goals%22, model)%0A for obj in M.objects.all():%0A obj.title = obj.name%0A obj.title_slug = obj.name_slug or slugify(obj.name)%0A obj.save()%0A%0A%0Adef _copy_title_to_name(model, apps):%0A %22%22%22Copy the values from the Model's title -%3E name and title_slug -%3E name_slug.%22%22%22%0A M = apps.get_model(%22goals%22, model)%0A for obj in M.objects.all():%0A obj.name = obj.title%0A obj.name_slug = obj.title_slug or slugify(obj.title)%0A obj.save()%0A%0A%0Adef copy_behavior_title(apps, schema_editor):%0A _copy_name_to_title(%22BehaviorSequence%22, apps)%0A%0A%0Adef copy_action_title(apps, schema_editor):%0A _copy_name_to_title(%22BehaviorAction%22, apps)%0A%0A%0Adef rev_copy_behavior_title(apps, schema_editor):%0A _copy_title_to_name(%22BehaviorSequence%22, apps)%0A%0A%0Adef rev_copy_action_title(apps, schema_editor):%0A _copy_title_to_name(%22BehaviorAction%22, apps)%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('goals', '0019_auto_20150312_1553'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(copy_behavior_title, reverse_code=rev_copy_behavior_title),%0A migrations.RunPython(copy_action_title, reverse_code=rev_copy_action_title),%0A %5D%0A
|
|
94cfc0a7598dd8dcf455311f8bb41c2016c7c3a8
|
Create solution.py
|
hackerrank/algorithms/warmup/easy/plus_minus/py/solution.py
|
hackerrank/algorithms/warmup/easy/plus_minus/py/solution.py
|
Python
| 0.000018 |
@@ -0,0 +1,821 @@
+#include %3Cmath.h%3E%0A#include %3Cstdio.h%3E%0A#include %3Cstring.h%3E%0A#include %3Cstdlib.h%3E%0A#include %3Cassert.h%3E%0A#include %3Climits.h%3E%0A#include %3Cstdbool.h%3E%0A%0Aint main(void) %0A%7B%0A int n; %0A scanf(%22%25d%22,&n);%0A int arr%5Bn%5D;%0A for(int arr_i = 0; arr_i %3C n; arr_i++)%7B%0A scanf(%22%25d%22,&arr%5Barr_i%5D);%0A %7D%0A %0A const double UNIT_RATIO = 1.0 / n;%0A %0A double posratio = 0.0;%0A double negratio = 0.0;%0A double zratio = 0.0;%0A %0A int value = 0; %0A %0A for (int i = 0; i %3C n; ++i) %7B%0A value = arr%5Bi%5D;%0A if (value %3E 0) %7B%0A posratio += UNIT_RATIO;%0A %7D else if (value %3C 0) %7B%0A negratio += UNIT_RATIO;%0A %7D else %7B%0A zratio += UNIT_RATIO;%0A %7D%0A %7D%0A %0A printf(%22%25lf%5Cn%22, posratio);%0A printf(%22%25lf%5Cn%22, negratio);%0A printf(%22%25lf%5Cn%22, zratio);%0A %0A return 0;%0A%7D%0A
|
|
a27d30c4514cef93e054d5597829dc758b04c95e
|
add xycut in util
|
TranskribusDU/util/XYcut.py
|
TranskribusDU/util/XYcut.py
|
Python
| 0.000001 |
@@ -0,0 +1,2311 @@
+# -*- coding: utf-8 -*-%0A%22%22%22%0A%0A%0A XYcut.py%0A %0A vertical/ horizontal cuts for page elements: %0A%0A copyright Naver Labs Europe 2018%0A READ project %0A%0A%22%22%22%0A%0Adef mergeSegments(lSegment, iMin):%0A %22%22%22Take as input a list of interval on some axis,%0A together with the object that contributed to this interval.%0A In this module it's a textbox or an image%0A Merge overlapping segments%0A Return a sorted list of disjoints segments together%0A with the associated objects (that is the union of the objects%0A associated to the segments being merged)%0A Contiguous segments closer than iMin are merged as well.%0A INPUT: %5B (a,b,o) , ...%5D%0A or INPUT: %5B (a,b, %5Bo,...%5D) , ...%5D%0A OUPUT: %5B (c,d,%5Bo,...%5D) , ...%5D, min, max%0A%0A bProjOn may contain the name of the axis on which the projection has%0A been done (%22X%22 for an x-cut, %22Y%22 for an y-cut)%0A then in frontier mode , we keep smal intervals if they are coinciding%0A with a frontier (e.g. a very narrow horizontal split coinciding with%0A a line is kept despite it's narower than iMin%0A p and q are the boundaries along the other axis of the block to cut%0A %22%22%22%0A lMergedSegment = %5B%5D%0A for seg in lSegment:%0A (aaux,baux,o) = seg%0A lo = (o,)%0A a = min(aaux,baux) #just in case...%0A b = max(aaux,baux) #just in case...%0A%0A #find all overlapping or close-enough segments and merge them%0A lOverlap = %5B%5D%0A for mseg in lMergedSegment:%0A %5Baa,bb,loaux%5D = mseg%0A iOver = max(a,aa) - min(b, bb) #negative means overlap%0A if iOver %3C= iMin: #overlap or spaced by less than iMin pixel%0A lOverlap.append(mseg)%0A else:%0A pass #nothing to merge with%0A %0A if lOverlap:%0A #merge the current segment with all overlapping msegments%0A for aa, bb, lolo in lOverlap:%0A if aa%3Ca: a=aa%0A if bb%3Eb: b=bb%0A lo = lo + tuple(lolo)%0A for mseg in lOverlap:%0A lMergedSegment.remove(mseg)%0A #mseg = %5Ba, b, lo%5D%0A mseg = (a, b, tuple(lo))%0A lMergedSegment.append(mseg)%0A%0A #sorted list%0A lMergedSegment.sort()%0A amin = lMergedSegment%5B0%5D%5B0%5D%0A amax = lMergedSegment%5B-1%5D%5B1%5D%0A return tuple(lMergedSegment), amin, amax%0A
|
|
e5e24ddccf5de2fba743a97c1790406259399d18
|
Create one fixture for all tests
|
conftest.py
|
conftest.py
|
Python
| 0 |
@@ -0,0 +1,202 @@
+import pytest%0Afrom fixture.application import Application%0A%0A%[email protected](scope = %22session%22)%0Adef app(request):%0A fixture = Application()%0A request.addfinalizer(fixture.destroy)%0A return fixture%0A
|
|
044135bc108336cb4543b84578465c04a48e4eb9
|
Fix the get_version import for local use
|
tvrenamr/tvrenamr.py
|
tvrenamr/tvrenamr.py
|
#!/usr/bin/python
import logging
import os
import sys
from . import get_version
from config import Config
from episode import Episode
from errors import *
from logs import start_logging
from main import TvRenamr
from options import OptionParser
log = logging.getLogger('Core')
parser = OptionParser(usage='tvr [options] <file/folder>', version='Tv Renamr %s' % get_version())
options, args = parser.parse_args()
class FrontEnd():
def __init__(self, path):
# start logging
if options.debug:
options.log_level = 10
start_logging(options.log_file, options.log_level, options.quiet)
possible_config = (
options.config,
os.path.expanduser('~/.tvrenamr/config.yml'),
os.path.join(sys.path[0], 'config.yml'))
# get the first viable config from the list of possibles
self.config = None
for config in possible_config:
if config is not None and os.path.exists(config):
self.config = Config(config)
break
if self.config is None:
raise ConfigNotFoundException
# no path was passed in so assuming current directory.
if not path:
if options.debug:
log.debug('No file or directory specified, using '
'current directory')
path = [os.getcwd()]
# determine type
try:
file_list = self.__determine_type(path, options.recursive,
options.ignore_filelist)
except OSError:
parser.error('\'%s\' is not a file or directory. Ruh Roe!' % path)
if options.dry or options.debug:
self.__start_dry_run()
# kick off a rename for each file in the list
for details in file_list:
self.rename(details)
# if we're not doing a dry run add a blank line for clarity
if options.debug is False and options.dry is False:
log.info('')
if options.dry or options.debug:
self.__stop_dry_run()
def __determine_type(self, path, recursive=False, ignore_filelist=None):
"""
Determines which files need to be processed for renaming.
:param path: The input file or directory.
:param recursive: Do a recursive search for files if 'path' is a
directory. Default is False.
:param ignore_filelist: Optional set of files to ignore from renaming.
Often used by filtering
methods such as Deluge.
:returns: A list of files to be renamed.
:rtype: A list of dictionaries, with the keys directory and filename.
"""
filelist = []
if len(path) > 1:
# must have used wildcards
for fn in path:
filelist.append(os.path.split(fn))
return filelist
else:
if os.path.isdir(path[0]):
for root, dirs, files in os.walk(path[0]):
for fname in files:
# If we have a file we should be ignoring and skipping.
if ignore_filelist is not None and \
(os.path.join(root, fname) in ignore_filelist):
continue
filelist.append((root, fname))
# Don't want a recursive walk?
if not recursive:
break
return filelist
elif os.path.isfile(path[0]):
return [os.path.split(path[0])]
else:
raise OSError
def rename(self, details):
working, filename = details
try:
tv = TvRenamr(working, self.config, options.debug, options.dry)
episode = Episode()
episode.show, episode.season, episode.episode, episode.extension = tv.extract_details_from_file(filename, user_regex=options.regex)
if options.show:
episode.show = options.show
if options.season:
episode.season = options.season
if options.episode:
episode.episode = options.episode
episode.title = tv.retrieve_episode_name(episode, library=options.library, canonical=options.canonical)
episode.show = tv.format_show_name(episode.show, the=options.the, override=options.show_override)
path = tv.build_path(episode, rename_dir=options.rename_dir, organise=options.organise, format=options.output_format)
tv.rename(filename, path)
except (ConfigNotFoundException, NoNetworkConnectionException, NoMoreLibrariesException):
if options.dry or options.debug:
self.__stop_dry_run()
exit()
except (EmptyEpisodeNameException, \
EpisodeAlreadyExistsInDirectoryException, \
EpisodeNotFoundException, \
IncorrectCustomRegularExpressionSyntaxException, \
OutputFormatMissingSyntaxException, ShowNotFoundException, \
UnexpectedFormatException, XMLEmptyException):
pass
except Exception as err:
if options.debug:
log.critical(err)
pass
def __start_dry_run(self):
log.log(26, 'Dry Run beginning.')
log.log(26, '-' * 70)
log.log(26, '')
def __stop_dry_run(self):
log.log(26, '')
log.log(26, '-' * 70)
log.log(26, 'Dry Run complete. No files were harmed in the process.')
log.log(26, '')
def run():
# Need to capture the Deluge arguments here, before we enter rename so
# we can instead pass it as a callback to be called once we've fetched
# the required information from deluge.
if options.deluge or options.deluge_ratio:
if options.deluge and not options.deluge_ratio:
options.deluge_ratio = 0
from lib.filter_deluge import get_deluge_ignore_file_list
get_deluge_ignore_file_list(rename, options.deluge_ratio, args[0])
else:
FrontEnd(args)
if __name__ == "__main__":
run()
|
Python
| 0.000002 |
@@ -54,17 +54,24 @@
s%0A%0Afrom
-.
+__init__
import
|
74d274f02fa23f1a6799e9f96ccb1ef77162f1bc
|
Add new package: consul (#18044)
|
var/spack/repos/builtin/packages/consul/package.py
|
var/spack/repos/builtin/packages/consul/package.py
|
Python
| 0 |
@@ -0,0 +1,981 @@
+# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass Consul(MakefilePackage):%0A %22%22%22Consul is a distributed, highly available,%0A and data center aware solution to connect and configure applications%0A across dynamic, distributed infrastructure.%22%22%22%0A%0A homepage = %22https://www.consul.io%22%0A url = %22https://github.com/hashicorp/consul/archive/v1.8.1.tar.gz%22%0A%0A version('1.8.1', sha256='c173e9866e6181b3679a942233adade118976414f6ca2da8deaea0fa2bba9b06')%0A version('1.8.0', sha256='a87925bde6aecddf532dfd050e907b6a0a6447cdd5dc4f49b46d97c9f73b58f9')%0A version('1.7.6', sha256='893abad7563c1f085303705f72d8789b338236972123f0ab6d2be24dbb58c2ac')%0A%0A depends_on('[email protected]:')%0A%0A def install(self, spec, prefix):%0A install_tree('bin', prefix.bin)%0A install_tree('lib', prefix.lib)%0A
|
|
6427406fc627b467dd4851f32b6a15a74356ef2d
|
Create new package. (#6043)
|
var/spack/repos/builtin/packages/r-gviz/package.py
|
var/spack/repos/builtin/packages/r-gviz/package.py
|
Python
| 0 |
@@ -0,0 +1,3127 @@
+##############################################################################%0A# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.%0A# Produced at the Lawrence Livermore National Laboratory.%0A#%0A# This file is part of Spack.%0A# Created by Todd Gamblin, [email protected], All rights reserved.%0A# LLNL-CODE-647188%0A#%0A# For details, see https://github.com/llnl/spack%0A# Please also see the NOTICE and LICENSE files for our notice and the LGPL.%0A#%0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Lesser General Public License (as%0A# published by the Free Software Foundation) version 2.1, February 1999.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and%0A# conditions of the GNU Lesser General Public License for more details.%0A#%0A# You should have received a copy of the GNU Lesser General Public%0A# License along with this program; if not, write to the Free Software%0A# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA%0A##############################################################################%0Afrom spack import *%0A%0A%0Aclass RGviz(RPackage):%0A %22%22%22Genomic data analyses requires integrated visualization%0A of known genomic information and new experimental data. Gviz%0A uses the biomaRt and the rtracklayer packages to perform live%0A annotation queries to Ensembl and UCSC and translates this to%0A e.g. gene/transcript structures in viewports of the grid%0A graphics package. This results in genomic information plotted%0A together with your data.%22%22%22%0A%0A homepage = %22http://bioconductor.org/packages/Gviz/%22%0A url = %22https://git.bioconductor.org/packages/Gviz%22%0A%0A version('1.20.0', git='https://git.bioconductor.org/packages/Gviz', commit='299b8255e1b03932cebe287c3690d58c88f5ba5c')%0A%0A depends_on('[email protected]:3.4.9', when='@1.20.0')%0A depends_on('r-s4vectors', type=('build', 'run'))%0A depends_on('r-iranges', type=('build', 'run'))%0A depends_on('r-genomicranges', type=('build', 'run'))%0A depends_on('r-xvector', type=('build', 'run'))%0A depends_on('r-rtracklayer', type=('build', 'run'))%0A depends_on('r-lattice', type=('build', 'run'))%0A depends_on('r-rcolorbrewer', type=('build', 'run'))%0A depends_on('r-biomart', type=('build', 'run'))%0A depends_on('r-annotationdbi', type=('build', 'run'))%0A depends_on('r-biobase', type=('build', 'run'))%0A depends_on('r-genomicfeatures', type=('build', 'run'))%0A depends_on('r-bsgenome', type=('build', 'run'))%0A depends_on('r-biostrings', type=('build', 'run'))%0A depends_on('r-biovizbase', type=('build', 'run'))%0A depends_on('r-rsamtools', type=('build', 'run'))%0A depends_on('r-latticeextra', type=('build', 'run'))%0A depends_on('r-matrixstats', type=('build', 'run'))%0A depends_on('r-genomicalignments', type=('build', 'run'))%0A depends_on('r-genomeinfodb', type=('build', 'run'))%0A depends_on('r-biocgenerics', type=('build', 'run'))%0A depends_on('r-digest', type=('build', 'run'))%0A
|
|
0a25b4d4c0bc511592d797f6b214dd6fa3f70dd8
|
FIX unhardwire 'modDate' to easy support for 'creDate'
|
scripts/managedb/lastest-updates.py
|
scripts/managedb/lastest-updates.py
|
#!/usr/bin/python
# -*- coding: latin-1 -*-
# Copyright 2013 Telefonica Investigacion y Desarrollo, S.A.U
#
# This file is part of Orion Context Broker.
#
# Orion Context Broker is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Orion Context Broker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Orion Context Broker. If not, see http://www.gnu.org/licenses/.
#
# For those usages not covered by this license please contact with
# fermin at tid dot es
from pymongo import MongoClient, DESCENDING
from datetime import datetime
from sys import argv
def printAttrs(attrHash, max):
# Given that each entity can have N attributes where N can be greater than 1, we need to add a second level
# of limit control (beyong the ".limit(max)" in the mongo query)
n = 0
for d in sorted(attrHash.keys(), reverse=True):
for attr in attrHash[d]:
printableDate = datetime.fromtimestamp(d).strftime('%Y-%m-%d %H:%M:%S')
print '-- ' + printableDate + ': '+ attr
n += 1
if n == max:
return
if 4 <= len(argv) <= 5:
type = argv[1]
db = argv[2]
max = int(argv[3])
else:
print 'Wrong number of arguments'
print ' Usage: ./lastest-updates.py <entities|attributes> <db> <limit> [entity_filter] '
print ' Example ./lastest-updates.py entities orion 10'
print ' Example ./lastest-updates.py entities orion 10 TEST_SENSOR'
exit(1)
# Optional argument: filter
query = {}
if len(argv) == 5:
query['_id.id'] = {'$regex': argv[4]}
client = MongoClient('localhost', 27017)
col = client[db]['entities']
if type == 'entities':
query['modDate'] = {'$exists': True}
for doc in col.find(query).sort('modDate', direction=DESCENDING).limit(max):
modDate = int(doc['modDate'])
dateString = datetime.fromtimestamp(modDate).strftime('%Y-%m-%d %H:%M:%S')
entityString = doc['_id']['id'] + ' (' + doc['_id']['type'] + ')'
print '-- ' + dateString + ': ' + entityString
elif type == 'attributes':
# Attributes are stored in a hash. The key of the hash is the modification date, so it is actually a
# hash of lists (due to several attributes could have the same modification date)
attrHash = { }
query['attrs.modDate'] = {'$exists': True}
for doc in col.find(query).sort('modDate', direction=DESCENDING).limit(max):
entityString = doc['_id']['id'] + ' (' + doc['_id']['type'] + ')'
for attr in doc['attrs']:
if attr.has_key('modDate'):
modDate = int(attr['modDate'])
attrString = attr['name'] + ' - ' + entityString
if attrHash.has_key(modDate):
attrHash[modDate].append(attrString)
else:
attrHash[modDate] = [attrString]
printAttrs(attrHash, max)
else:
print 'Unsuported type: <' + type + '>'
exit (1)
|
Python
| 0 |
@@ -1013,16 +1013,176 @@
t argv%0A%0A
+# This script can be easily adapted to used creation date instead of modification date%0A# just changing the following variable to 'creDate'%0ArefDate = 'modDate'%0A%0A
def prin
@@ -2265,25 +2265,23 @@
query%5B
-'mod
+ref
Date
-'
%5D = %7B'$e
@@ -2326,33 +2326,31 @@
query).sort(
-'mod
+ref
Date
-'
, direction=
@@ -2373,36 +2373,33 @@
t(max):%0A
-modD
+d
ate = int(doc%5B'm
@@ -2396,25 +2396,23 @@
int(doc%5B
-'mod
+ref
Date
-'
%5D)%0A
@@ -2450,20 +2450,17 @@
mestamp(
-modD
+d
ate).str
@@ -2871,24 +2871,27 @@
%5B'attrs.
-mod
+' + ref
Date
-'
%5D = %7B'$e
@@ -2940,25 +2940,23 @@
y).sort(
-'mod
+ref
Date
-'
, direct
@@ -3118,25 +3118,23 @@
has_key(
-'mod
+ref
Date
-'
):%0A
@@ -3142,20 +3142,17 @@
-modD
+d
ate = in
@@ -3158,25 +3158,23 @@
nt(attr%5B
-'mod
+ref
Date
-'
%5D)%0A
@@ -3265,20 +3265,17 @@
has_key(
-modD
+d
ate):%0A
@@ -3295,28 +3295,25 @@
attrHash%5B
-modD
+d
ate%5D.append(
@@ -3371,20 +3371,17 @@
ttrHash%5B
-modD
+d
ate%5D = %5B
|
0f68667e2ddfee6a370afe5c816a1358cfba799e
|
Correct GitHub URL.
|
openfisca_qt/widgets/__init__.py
|
openfisca_qt/widgets/__init__.py
|
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <[email protected]>
#
# Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team
# https://github.com/openfisca/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# The widgets variables are created by each country-specific package (cf function init_country())
# Note: The variables below are not inited (to None) here, to ensure that execution will fail when they are used before
# OpenFisca country-specific package is properly inited.
__all__ = [
'CompositionWidget',
]
|
Python
| 0.000028 |
@@ -201,26 +201,16 @@
penfisca
-/openfisca
%0A#%0A# Thi
|
b02b3e2e385bc04b2f1b1160371d55f8b6122006
|
add migration file
|
pyanalysis/apps/corpus/migrations/0001_initial.py
|
pyanalysis/apps/corpus/migrations/0001_initial.py
|
Python
| 0.000001 |
@@ -0,0 +1,3077 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0Aimport django.utils.timezone%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A %5D%0A%0A operations = %5B%0A migrations.CreateModel(%0A name='Dataset',%0A fields=%5B%0A ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),%0A ('name', models.CharField(max_length=150)),%0A ('description', models.TextField()),%0A ('created_at', models.DateTimeField(auto_now_add=True)),%0A %5D,%0A options=%7B%0A %7D,%0A bases=(models.Model,),%0A ),%0A migrations.CreateModel(%0A name='Line',%0A fields=%5B%0A ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),%0A ('number', models.IntegerField(default=0)),%0A ('text', models.TextField(default=b'', null=True, blank=True)),%0A %5D,%0A options=%7B%0A %7D,%0A bases=(models.Model,),%0A ),%0A migrations.CreateModel(%0A name='Script',%0A fields=%5B%0A ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),%0A ('name', models.CharField(max_length=256)),%0A ('last_modified', models.DateTimeField(default=django.utils.timezone.now)),%0A ('dataset', models.ForeignKey(related_name='scripts', to='corpus.Dataset')),%0A %5D,%0A options=%7B%0A %7D,%0A bases=(models.Model,),%0A ),%0A migrations.CreateModel(%0A name='Token',%0A fields=%5B%0A ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),%0A ('st_col', models.IntegerField(default=0)),%0A ('ed_col', models.IntegerField(default=0)),%0A ('type', models.CharField(default=b'', max_length=32, null=True, blank=True)),%0A ('text', models.TextField(default=b'', null=True, blank=True)),%0A ('line', models.ForeignKey(related_name='tokens', to='corpus.Line')),%0A ('script', models.ForeignKey(related_name='tokens', to='corpus.Script')),%0A %5D,%0A options=%7B%0A %7D,%0A bases=(models.Model,),%0A ),%0A migrations.AlterIndexTogether(%0A name='token',%0A index_together=set(%5B('script', 'type')%5D),%0A ),%0A migrations.AlterIndexTogether(%0A name='script',%0A index_together=set(%5B('dataset', 'last_modified'), ('dataset', 'name')%5D),%0A ),%0A migrations.AddField(%0A model_name='line',%0A name='script',%0A field=models.ForeignKey(related_name='lines', to='corpus.Script'),%0A preserve_default=True,%0A ),%0A migrations.AlterIndexTogether(%0A name='line',%0A index_together=set(%5B('script', 'number')%5D),%0A ),%0A %5D%0A
|
|
980594ab26887a4628620e9e0e00d89ddbdc4e49
|
Create hackstring.py
|
hackstring.py
|
hackstring.py
|
Python
| 0.000003 |
@@ -0,0 +1,148 @@
+#! /usr/bin/env python%0Aimport sys%0A%0Aprint %22%22.join(%5B%22%25%25%2502x%22 %25 ord(x) for x in sys.argv%5B1%5D%5D)%0Aprint %22%22.join(%5B%22%5C%5Cu%2504x%22 %25 ord(x) for x in sys.argv%5B1%5D%5D)%0A
|
|
2df34105a58a05fd1f50f88bc967360b4bd9afc8
|
Create LongestIncreasingSubseq_001.py
|
leetcode/300-Longest-Increasing-Subsequence/LongestIncreasingSubseq_001.py
|
leetcode/300-Longest-Increasing-Subsequence/LongestIncreasingSubseq_001.py
|
Python
| 0.000562 |
@@ -0,0 +1,453 @@
+class Solution(object):%0A def lengthOfLIS(self, nums):%0A %22%22%22%0A :type nums: List%5Bint%5D%0A :rtype: int%0A %22%22%22%0A n = len(nums)%0A %0A if n == 0:%0A return 0%0A %0A maxlen = %5B1 for i in range(n)%5D%0A for i in range(1, n):%0A for j in range(i):%0A if nums%5Bj%5D %3C nums%5Bi%5D and maxlen%5Bj%5D + 1 %3E maxlen%5Bi%5D:%0A maxlen%5Bi%5D = maxlen%5Bj%5D + 1%0A%0A return max(maxlen)%0A
|
|
c2d26a5942cb22f4510abd6d5ff8c83d6a386810
|
make migrations and model updates
|
masterlist/candidates/migrations/0005_auto_20160725_1759.py
|
masterlist/candidates/migrations/0005_auto_20160725_1759.py
|
Python
| 0 |
@@ -0,0 +1,2655 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('candidates', '0004_auto_20160708_1422'),%0A %5D%0A%0A operations = %5B%0A migrations.RemoveField(%0A model_name='candidate',%0A name='mask_blue',%0A ),%0A migrations.RemoveField(%0A model_name='candidate',%0A name='mask_red',%0A ),%0A migrations.RemoveField(%0A model_name='candidate',%0A name='sb_max',%0A ),%0A migrations.RemoveField(%0A model_name='candidate',%0A name='sb_min',%0A ),%0A migrations.AddField(%0A model_name='candidate',%0A name='data_season',%0A field=models.CharField(max_length=64, null=True, verbose_name=b'DES data season'),%0A ),%0A migrations.AddField(%0A model_name='candidate',%0A name='dec_field',%0A field=models.FloatField(null=True, verbose_name=b'Dec Field'),%0A ),%0A migrations.AddField(%0A model_name='candidate',%0A name='followup_date',%0A field=models.CharField(max_length=64, null=True, verbose_name=b'followup date'),%0A ),%0A migrations.AddField(%0A model_name='candidate',%0A name='followup_facility',%0A field=models.CharField(max_length=64, null=True, verbose_name=b'followup facility'),%0A ),%0A migrations.AddField(%0A model_name='candidate',%0A name='followup_success',%0A field=models.CharField(max_length=64, null=True, verbose_name=b'followup success'),%0A ),%0A migrations.AddField(%0A model_name='candidate',%0A name='lens_class',%0A field=models.CharField(max_length=64, null=True, verbose_name=b'Lens (Y/N)'),%0A ),%0A migrations.AddField(%0A model_name='candidate',%0A name='ra_field',%0A field=models.FloatField(null=True, verbose_name=b'RA Field'),%0A ),%0A migrations.AddField(%0A model_name='candidate',%0A name='system_type',%0A field=models.CharField(max_length=64, null=True, verbose_name=b'Type of Candidate (gal or qso)'),%0A ),%0A migrations.AddField(%0A model_name='candidate',%0A name='z_phot_lens',%0A field=models.FloatField(null=True, verbose_name=b'Z photo lens'),%0A ),%0A migrations.AddField(%0A model_name='candidate',%0A name='z_spec_src',%0A field=models.FloatField(null=True, verbose_name=b'Z spec source '),%0A ),%0A %5D%0A
|
|
94e4d30dbdbcf9765bf731b1bd792d0fcf3f9d4a
|
Add prettification middleware
|
maccman/middleware/prettify.py
|
maccman/middleware/prettify.py
|
Python
| 0.000001 |
@@ -0,0 +1,356 @@
+from bs4 import BeautifulSoup%0A%0Aclass PrettifyMiddleware(object):%0A def process_response(self, request, response):%0A if response.status_code == 200:%0A if response%5B%22content-type%22%5D.startswith(%22text/html%22):%0A beauty = BeautifulSoup(response.content)%0A response.content = beauty.prettify()%0A return response%0A%0A
|
|
be530dc2e18ccbeeb3e4396f47d2a527364e6ab1
|
Add migration for ADS.added_via
|
migrations/versions/f8c0bde5d368_match_sqlalchemy_defintion_and_actual_.py
|
migrations/versions/f8c0bde5d368_match_sqlalchemy_defintion_and_actual_.py
|
Python
| 0.000001 |
@@ -0,0 +1,654 @@
+%22%22%22Match sqlalchemy defintion and actual schema%0A%0ARevision ID: f8c0bde5d368%0ARevises: ae904ac154cf%0ACreate Date: 2019-11-19 11:24:40.555110%0A%0A%22%22%22%0A%0A# revision identifiers, used by Alembic.%0Arevision = 'f8c0bde5d368'%0Adown_revision = 'ae904ac154cf'%0A%0Afrom alembic import op%0Aimport sqlalchemy as sa%0Afrom sqlalchemy.dialects import postgresql%0A%0Adef upgrade():%0A op.alter_column('ADS', 'added_via',%0A existing_type=postgresql.ENUM('form', 'api', name='via'),%0A nullable=False)%0A%0A%0Adef downgrade():%0A op.alter_column('ADS', 'added_via',%0A existing_type=postgresql.ENUM('form', 'api', name='via'),%0A nullable=True)%0A
|
|
50415300e3ce1e7cc10782aa4661da14d900d6de
|
Add code generation tests
|
benchmarks/regression/benchmarks/codegen.py
|
benchmarks/regression/benchmarks/codegen.py
|
Python
| 0 |
@@ -0,0 +1,310 @@
+from examples.seismic.tti.tti_example import tti_setup%0A%0A%0Arepeat = 3%0A%0A%0Aclass TTI(object):%0A%0A space_order = 12%0A%0A def setup(self):%0A self.solver = tti_setup(space_order=TTI.space_order)%0A%0A def time_forward(self):%0A self.solver.op_fwd()%0A%0A def time_adjoint(self):%0A self.solver.op_adj()%0A
|
|
5dd3424e9d95c12c2fb4c770f527b85b928da705
|
create a separate module for decoration/coloring
|
decorate.py
|
decorate.py
|
Python
| 0 |
@@ -0,0 +1,202 @@
+class bcolors:%0A HEADER = '%5C033%5B95m'%0A OKBLUE = '%5C033%5B94m'%0A OKGREEN = '%5C033%5B92m'%0A WARNING = '%5C033%5B93m'%0A FAIL = '%5C033%5B91m'%0A ENDC = '%5C033%5B0m'%0A BOLD = '%5C033%5B1m'%0A UNDERLINE = '%5C033%5B4m'
|
|
89d08498f7f7e12fa5486eb88f64829621aa27f9
|
Add missing migration
|
src/nodeconductor_saltstack/saltstack/migrations/0005_label_change.py
|
src/nodeconductor_saltstack/saltstack/migrations/0005_label_change.py
|
Python
| 0.0002 |
@@ -0,0 +1,438 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('saltstack', '0004_remove_spl_state'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterModelOptions(%0A name='saltstackservice',%0A options=%7B'verbose_name': 'SaltStack service', 'verbose_name_plural': 'SaltStack service'%7D,%0A ),%0A %5D%0A
|
|
8cde7867eb98cc56533ab0156768ad2409e8c65e
|
Fix bug
|
user_notification.py
|
user_notification.py
|
#!/usr/bin/python
# encoding: utf-8
from datetime import datetime
import string
from django.core.mail import send_mail
from notifications.models import UserNotification
import settings
email_template = u'''${username}您好:
您有${cnt}条新消息,请点击下面的链接查看:
${msg_url}
感谢使用我们的网站!
${site_name}团队
'''
today = datetime.now()
site_name = settings.SITE_NAME
subject = u'%s:新消息' % site_name
site_base = settings.SITE_BASE
if site_base[-1] != '/':
site_base += '/'
url = site_base + 'home/my/'
notifications = UserNotification.objects.all()
d = {}
for e in notifications:
if today.year != e.timestamp.year or today.month != e.timestamp.month or \
today.day != e.timestamp.day:
continue
if d.has_key(e.to_user):
d[e.to_user] += 1
else:
d[e.to_user] = 1
for k in d.keys():
to_user = k
cnt = d[k]
template = string.Template(email_template)
content = template.substitute(username=to_user, cnt=cnt, msg_url=url, \
site_name=site_name)
send_mail(subject, content, settings.DEFAULT_FROM_EMAIL, [to_user], \
fail_silently=False)
|
Python
| 0.000001 |
@@ -451,16 +451,93 @@
+= '/'%0A
+site_root = settings.SITE_ROOT%0Aif site_root%5B-1%5D != '/':%0A site_root += '/'%0A
url = si
@@ -545,16 +545,28 @@
e_base +
+ site_root +
'home/m
|
6d8fb7d052dc7341ecd9fb3388b804b82f77fa0f
|
add example usage
|
examples/scores.py
|
examples/scores.py
|
Python
| 0 |
@@ -0,0 +1,914 @@
+%22%22%22Get a list of average scores for each professor in a department.%22%22%22%0Aimport sys%0Afrom collections import defaultdict%0A%0Aimport penncoursereview as pcr%0A%0A%0Adef prof_scores(dept):%0A professor_scores = defaultdict(list)%0A dept = pcr.Department(dept)%0A for review in dept.reviews.values:%0A instructor = review.instructor%0A rating = review.ratings.rInstructorQuality%0A professor_scores%5Binstructor.name%5D.append(float(rating))%0A return professor_scores%0A%0A%0Adef averages(dept):%0A professor_scores = prof_scores(dept)%0A for prof, scores in professor_scores.iteritems():%0A score = sum(scores) / len(scores)%0A yield prof, score%0A%0A%0Adef main(dept):%0A for prof, avg in sorted(averages(dept), key=lambda x: x%5B1%5D):%0A print %22%25s %25.2f%22 %25 (prof, avg)%0A%0A%0Aif __name__ == %22__main__%22:%0A if (len(sys.argv) %3C 2):%0A print %22usage: scores.py %3Cdepartment%3E%22%0A else:%0A main(sys.argv%5B1%5D)%0A
|
|
7dbc289897ecf35f0b709177ac3feacffd8691ca
|
add a test file
|
ch_04/testfile.py
|
ch_04/testfile.py
|
Python
| 0.000001 |
@@ -0,0 +1,32 @@
+#this is a test file for eclipse
|
|
2b1b1e1d5db7edf4350239b712d2e872e7769d84
|
add problem 24
|
euler024.py
|
euler024.py
|
Python
| 0.998228 |
@@ -0,0 +1,671 @@
+#!/usr/bin/env python%0A%0Adef nextperm(s):%0A '''%0A http://en.wikipedia.org/wiki/Permutation#Generation_in_lexicographic_order%0A '''%0A k = None%0A for i in range(len(s)-1):%0A if s%5Bi%5D %3C s%5Bi+1%5D:%0A k = i%0A if k is None:%0A # sequence in descending order, last permutation%0A return None%0A l = None%0A for i in range(k+1, len(s)):%0A if s%5Bi%5D %3E s%5Bk%5D:%0A l = i%0A hold = s%5Bl%5D%0A s%5Bl%5D = s%5Bk%5D%0A s%5Bk%5D = hold%0A # reverse s from k+1 to the end%0A t = s%5Bk+1:%5D%0A t.reverse()%0A s%5Bk+1:%5D = t%0A return s%0A%0Aif __name__==%22__main__%22:%0A debugging = False%0A s = range(10)%0A permutations = 10**6-1%0A for perm in xrange(permutations):%0A nextperm(s)%0A if debugging:%0A print s%0A print s%0A
|
|
03279bbc6193d3944dcd2542daa65701a1e0eded
|
Add solution for problem 26
|
euler026.py
|
euler026.py
|
Python
| 0.000159 |
@@ -0,0 +1,469 @@
+#!/usr/bin/python%0A%0A%22%22%22%0AFor resolve this, we have to find the maximum%0AFull Reptend Prime int he given limit. To do that, we need%0Ato check if the 10 is a primitive root of p.%0A%0ASee http://mathworld.wolfram.com/FullReptendPrime.html for details%0A%22%22%22%0A%0Afrom sys import exit%0A%0Afor p in range(999, 7, -2):%0A for k in range(1, p):%0A if (10 ** k) %25 p == 1:%0A if k != p - 1:%0A break%0A else:%0A print(p)%0A exit(0)%0A
|
|
8373611a9c5b035953aee208bc65f4be92890314
|
add the conversion script
|
scripts/conversionScripts/toTransformationNode.py
|
scripts/conversionScripts/toTransformationNode.py
|
Python
| 0.000001 |
@@ -0,0 +1,985 @@
+import xml.etree.ElementTree as ET%0Aimport xml.dom.minidom as pxml%0Aimport os%0A%0Adef convert(tree,fileName=None):%0A %22%22%22%0A Converts input files to be compatible with merge request ....%0A change the attribute of node %3CvariablesTransformation%3E from 'model' to 'distribution'%0A @ In, tree, xml.etree.ElementTree.ElementTree object, the contents of a RAVEN input file%0A @ Out, tree, xml.etree.ElementTree.ElementTree object, the modified RAVEN input file%0A %22%22%22%0A simulation = tree.getroot()%0A if simulation.tag!='Simulation': return tree #this isn't an input file%0A for distNode in simulation.iter('MultivariateNormal'):%0A distName = distNode.get('name')%0A break%0A for vtNode in simulation.iter('variablesTransformation'):%0A vtNode.set('distribution', distName)%0A modelName = vtNode.get('model')%0A if modelName != None:%0A del vtNode.attrib%5B'model'%5D%0A return tree%0A%0Aif __name__=='__main__':%0A import convert_utils%0A import sys%0A convert_utils.standardMain(sys.argv,convert)%0A
|
|
55a35c642b64a6bdb8314b9470c1f7fedb16478f
|
print results
|
tensorflow/python/keras/utils/np_utils.py
|
tensorflow/python/keras/utils/np_utils.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Numpy-related utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.utils.to_categorical')
def to_categorical(y, num_classes=None, dtype='float32'):
"""Converts a class vector (integers) to binary class matrix.
E.g. for use with categorical_crossentropy.
Usage Example:
>>> y = [0, 1, 2, 3]
>>> tf.keras.utils.to_categorical(y, num_classes=4)
array([...], dtype=float32)
Arguments:
y: class vector to be converted into a matrix
(integers from 0 to num_classes).
num_classes: total number of classes.
dtype: The data type expected by the input. Default: `'float32'`.
Returns:
A binary matrix representation of the input. The classes axis is placed
last.
"""
y = np.array(y, dtype='int')
input_shape = y.shape
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
y = y.ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes), dtype=dtype)
categorical[np.arange(n), y] = 1
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
return categorical
@keras_export('keras.utils.normalize')
def normalize(x, axis=-1, order=2):
"""Normalizes a Numpy array.
Arguments:
x: Numpy array to normalize.
axis: axis along which to normalize.
order: Normalization order (e.g. 2 for L2 norm).
Returns:
A normalized copy of the array.
"""
l2 = np.atleast_1d(np.linalg.norm(x, order, axis))
l2[l2 == 0] = 1
return x / np.expand_dims(l2, axis)
|
Python
| 0.000004 |
@@ -1229,11 +1229,111 @@
ay(%5B
-...
+%5B1., 0., 0., 0.%5D,%0A %5B0., 1., 0., 0.%5D,%0A %5B0., 0., 1., 0.%5D,%0A %5B0., 0., 0., 1.%5D
%5D, d
|
ae972cd7fe6856a1265981810ea1d03fc5efcf54
|
write test for django admin
|
tests/organisations/test_admin.py
|
tests/organisations/test_admin.py
|
Python
| 0.000001 |
@@ -0,0 +1,520 @@
+import pytest%0Afrom django.urls import reverse%0A%0A%[email protected]_db%0Adef test_organisation_admin_form(client, organisation,%0A admin, user_factory,%0A group_factory):%0A%0A client.login(username=admin, password='password')%0A url = reverse('admin:meinberlin_organisations_organisation_add')%0A response = client.get(url)%0A assert response.status_code == 200%0A%0A data = %7B'name': 'My Organisation'%7D%0A response = client.post(url, data)%0A%0A assert 1 == 2%0A
|
|
cce3b017f36de8fb8682971e13201c0143c524cf
|
add indexes to make deleting faster
|
aeromancer/db/alembic/versions/a3d002d161a_add_indexes.py
|
aeromancer/db/alembic/versions/a3d002d161a_add_indexes.py
|
Python
| 0.000001 |
@@ -0,0 +1,501 @@
+%22%22%22add indexes%0A%0ARevision ID: a3d002d161a%0ARevises: 22e0aa22ab8e%0ACreate Date: 2014-11-24 14:24:29.824147%0A%0A%22%22%22%0A%0A# revision identifiers, used by Alembic.%0Arevision = 'a3d002d161a'%0Adown_revision = '22e0aa22ab8e'%0A%0Afrom alembic import op%0Aimport sqlalchemy as sa%0A%0A%0Adef upgrade():%0A op.create_index('file_project_idx', 'file', %5B'project_id'%5D)%0A op.create_index('line_file_idx', 'line', %5B'file_id'%5D)%0A%0A%0Adef downgrade():%0A op.drop_index('line_file_idx', 'line')%0A op.drop_index('file_project_idx', 'file')%0A
|
|
61c2823b5da460001ca02db6b028fc770d204e32
|
Add initial test case
|
api_tests.py
|
api_tests.py
|
Python
| 0.00002 |
@@ -0,0 +1,1287 @@
+from flask import Flask, g%0Aimport unittest%0Aimport json%0Afrom simplekv.fs import FilesystemStore%0Afrom flaskext.kvsession import KVSessionExtension%0A%0Afrom api import app, db%0Afrom common.database import Database%0A%0Aclass APITest(unittest.TestCase):%0A%0A def setUp(self):%0A global db%0A store = FilesystemStore('session')%0A KVSessionExtension(store, app)%0A %0A # Load the debug config%0A app.config.from_pyfile('../config.defaults.py')%0A app.config.from_pyfile('../config_debug.py')%0A app.secret_key = app.config%5B'SECRET_KEY'%5D%0A db = Database(app.config)%0A%0A self._setup_database()%0A app.testing = True%0A self.app = app.test_client(use_cookies=True)%0A%0A self.csrf = ''%0A%0A %22%22%22Setup the database%0A by clearing it and loading the schema%22%22%22%0A def _setup_database(self):%0A con = db.get_connection()%0A cur = con.cursor()%0A%0A cur.execute(open('schema.sql', 'r').read())%0A con.commit()%0A %0A db.put_away_connection(con)%0A%0A def test_1_api_base(self):%0A rv = self.app.get('/api/')%0A data = json.loads(rv.data)%0A assert data%5B'status'%5D%5B'code'%5D is 0%0A assert data%5B'csrf_token'%5D%0A self.csrf = data%5B'csrf_token'%5D%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
|
|
72db299a3974b05f511420da5e5861f3bead0065
|
Create solution_1.py
|
problem301/Python/solution_1.py
|
problem301/Python/solution_1.py
|
Python
| 0.005726 |
@@ -0,0 +1,528 @@
+#!/usr/bin/env python%0A# coding=utf-8%0Adef nim():%0A binary_map = %5B0,1%5D%0A total = 3%0A for k in range(28):%0A binary_map_new = %5B%5D%0A for i in range(0, len(binary_map), 2):%0A if binary_map%5Bi:i+2%5D == %5B0,0%5D:%0A binary_map_new.extend(%5B0,0,0,1%5D)%0A total += 3%0A elif binary_map%5Bi:i+2%5D == %5B0,1%5D:%0A binary_map_new.extend(%5B0,0%5D)%0A total += 2%0A%0A binary_map = binary_map_new%0A %0A return total%0A%0Aif __name__ == %22__main__%22:%0A print(nim())%0A
|
|
9b9e76e08531dec4cca5a2eac1067b0a1c7730dc
|
Remove serializer include
|
project/apps/bhs/serializers.py
|
project/apps/bhs/serializers.py
|
# Third-Party
from dry_rest_permissions.generics import DRYPermissionsField
from rest_framework.validators import UniqueTogetherValidator
from rest_framework_json_api import serializers
from rest_framework.serializers import SerializerMethodField
# Local
from .fields import TimezoneField
from .models import Group
from .models import Member
from .models import Officer
from .models import Person
from .models import Chart
from .models import Repertory
class GroupSerializer(serializers.ModelSerializer):
permissions = DRYPermissionsField()
included_serializers = {
'repertories': 'apps.bhs.serializers.RepertorySerializer',
# 'members': 'apps.bhs.serializers.MemberSerializer',
'officers': 'apps.bhs.serializers.OfficerSerializer',
# 'entries': 'api.serializers.EntrySerializer',
}
image_id = serializers.SerializerMethodField()
def get_image_id(self, obj):
if obj.image:
return obj.image.name
else:
return 'missing_image'
class Meta:
model = Group
fields = [
'id',
'url',
'name',
'status',
'kind',
'gender',
'is_senior',
'is_youth',
'division',
'code',
'start_date',
'end_date',
'location',
'website',
'facebook',
'twitter',
'email',
'phone',
'image',
'image_id',
'description',
'participants',
'bhs_id',
'international',
'district',
'chapter',
'tree_sort',
'parent',
'children',
'awards',
'conventions',
'entries',
# 'members',
'officers',
'repertories',
'permissions',
]
class JSONAPIMeta:
included_resources = [
'repertories',
# 'members',
'officers',
# 'entries',
]
# def to_representation(self, instance):
# if instance.kind <= 30:
# self.fields.pop('members')
# return super().to_representation(instance)
class MemberSerializer(serializers.ModelSerializer):
permissions = DRYPermissionsField()
class Meta:
model = Member
fields = [
'id',
'url',
'status',
'part',
'start_date',
'end_date',
'group',
'person',
'permissions',
]
class OfficerSerializer(serializers.ModelSerializer):
permissions = DRYPermissionsField()
class Meta:
model = Officer
fields = [
'id',
'url',
'status',
'start_date',
'end_date',
'office',
'person',
'group',
'permissions',
]
validators = [
UniqueTogetherValidator(
queryset=Officer.objects.all(),
fields=('person', 'office'),
message='This person already holds this office.',
)
]
class PersonSerializer(serializers.ModelSerializer):
permissions = DRYPermissionsField()
included_serializers = {
# 'assignments': 'api.serializers.AssignmentSerializer',
'members': 'apps.bhs.serializers.MemberSerializer',
'officers': 'apps.bhs.serializers.OfficerSerializer',
# 'panelists': 'api.serializers.PanelistSerializer',
}
image_id = serializers.SerializerMethodField()
def get_image_id(self, obj):
if obj.image:
return obj.image.name
else:
return 'missing_image'
class Meta:
model = Person
fields = (
'id',
'url',
'first_name',
'middle_name',
'last_name',
'nick_name',
'status',
'birth_date',
'spouse',
'location',
'part',
'website',
'email',
'address',
'home_phone',
'work_phone',
'cell_phone',
'airports',
'image',
'image_id',
'description',
'gender',
'bhs_id',
'current_through',
'current_status',
'current_district',
'full_name',
'common_name',
'sort_name',
# 'assignments',
'members',
'officers',
# 'panelists',
'user',
'permissions',
)
read_only_fields = [
'common_name',
'full_name',
'sort_name',
]
class JSONAPIMeta:
included_resources = [
# 'assignments',
# 'members',
# 'officers',
# 'panelists',
]
class ChartSerializer(serializers.ModelSerializer):
permissions = DRYPermissionsField()
image_id = serializers.SerializerMethodField()
def get_image_id(self, obj):
if obj.image:
return obj.image.name
else:
return 'missing_image'
class Meta:
model = Chart
fields = (
'id',
'url',
'status',
'title',
'arrangers',
'composers',
'lyricists',
'description',
'notes',
'image',
'image_id',
'holders',
'repertories',
# 'songs',
'permissions',
)
class RepertorySerializer(serializers.ModelSerializer):
permissions = DRYPermissionsField()
class Meta:
model = Repertory
fields = (
'id',
'url',
'status',
'group',
'chart',
'permissions',
)
validators = [
UniqueTogetherValidator(
queryset=Repertory.objects.all(),
fields=('group', 'chart'),
message='This chart already exists in your repertory.',
)
]
|
Python
| 0 |
@@ -3387,33 +3387,43 @@
ssignments': 'ap
-i
+ps.cmanager
.serializers.Ass
@@ -3445,24 +3445,26 @@
er',%0A
+ #
'members':
@@ -3503,32 +3503,34 @@
alizer',%0A
+ #
'officers': 'ap
@@ -4597,34 +4597,32 @@
me',%0A
- #
'assignments',%0A
|
53bf5c12b77e19d54e3ab50ade8840843cca9649
|
add sql group_by unit tests
|
siuba/tests/test_verb_group_by.py
|
siuba/tests/test_verb_group_by.py
|
Python
| 0.000001 |
@@ -0,0 +1,1378 @@
+%22%22%22%0ANote: this test file was heavily influenced by its dbplyr counterpart.%0A%0Ahttps://github.com/tidyverse/dbplyr/blob/master/tests/testthat/test-verb-group_by.R%0A%22%22%22%0A %0Afrom siuba import _, group_by, ungroup, summarize%0Afrom siuba.dply.vector import row_number, n%0A%0Aimport pytest%0Afrom .helpers import assert_equal_query, data_frame, backend_notimpl%0Afrom string import ascii_lowercase %0A%0ADATA = data_frame(x = %5B1,2,3%5D, y = %5B9,8,7%5D, g = %5B'a', 'a', 'b'%5D)%0A%[email protected](scope = %22module%22)%0Adef df(backend):%0A return backend.load_df(DATA)%0A%0A%0Adef test_group_by_no_add(df):%0A gdf = group_by(df, _.x, _.y)%0A assert gdf.group_by == (%22x%22, %22y%22)%0A%0Adef test_group_by_override(df):%0A gdf = df %3E%3E group_by(_.x, _.y) %3E%3E group_by(_.g)%0A assert gdf.group_by == (%22g%22,)%0A%0Adef test_group_by_add(df):%0A gdf = group_by(df, _.x) %3E%3E group_by(_.y, add = True)%0A%0A assert gdf.group_by == (%22x%22, %22y%22)%0A%0Adef test_group_by_ungroup(df):%0A q1 = df %3E%3E group_by(_.g)%0A assert q1.group_by == (%22g%22,)%0A%0A q2 = q1 %3E%3E ungroup()%0A assert q2.group_by == tuple()%0A%0A%[email protected](%22TODO: need to test / validate joins first%22)%0Adef test_group_by_before_joins(df):%0A assert False%0A%[email protected](%22TODO: (#52)%22)%0Adef test_group_by_performs_mutate(df):%0A assert_equal_query(%0A df,%0A group_by(z = _.x + _.y) %3E%3E summarize(n = n(_)),%0A data_frame(z = 10, n = 4)%0A )%0A%0A
|
|
c0d0496eb2675ba2dbd5dbaa9d4b4c701409308f
|
Allow IHaskellPrelude.hs to not be formatting checked
|
verify_formatting.py
|
verify_formatting.py
|
#!/usr/bin/env python3
from __future__ import print_function
import sys
import os
import subprocess
def hindent(contents):
with open(".tmp3", "w") as f:
f.write(contents)
with open(".tmp3", "r") as f:
output = subprocess.check_output(["hindent", "--style", "gibiansky"],
stdin=f)
return output.decode('utf-8')
def diff(src1, src2):
# Ignore trailing newlines
if src1[-1] == "\n":
src1 = src1[:-1]
if src2[-1] == "\n":
src2 = src2[:-1]
with open(".tmp1", "w") as f1:
f1.write(src1)
with open(".tmp2", "w") as f2:
f2.write(src2)
try:
output = subprocess.check_output(["diff", ".tmp1", ".tmp2"])
return output.decode('utf-8')
except subprocess.CalledProcessError as e:
return e.output.decode('utf-8')
# Verify that we're in the right directory
try:
open("ihaskell.cabal", "r").close()
except:
print(sys.argv[0], "must be run from the ihaskell directory",
file=sys.stderr)
# Find all the source files
sources = []
for source_dir in ["src", "ipython-kernel", "ihaskell-display"]:
for root, dirnames, filenames in os.walk(source_dir):
# Skip cabal dist directories
if "dist" in root:
continue
for filename in filenames:
# Take Haskell files, but ignore the Cabal Setup.hs
if filename.endswith(".hs") and filename != "Setup.hs":
sources.append(os.path.join(root, filename))
hindent_outputs = {}
for source_file in sources:
print("Formatting file", source_file)
with open(source_file, "r") as f:
original_source = f.read()
formatted_source = hindent(original_source)
hindent_outputs[source_file] = (original_source, formatted_source)
diffs = {filename: diff(original, formatted)
for (filename, (original, formatted)) in hindent_outputs.items()}
incorrect_formatting = False
for filename, diff in diffs.items():
if diff:
incorrect_formatting = True
print('Incorrect formatting in', filename)
print('=' * 10)
print(diff)
if incorrect_formatting:
sys.exit(1)
|
Python
| 0 |
@@ -1394,16 +1394,153 @@
etup.hs%0A
+ # Also ignore IHaskellPrelude.hs, it uses CPP in weird places%0A ignored_files = %5B%22Setup.hs%22, %22IHaskellPrelude.hs%22%5D%0A
@@ -1588,21 +1588,28 @@
ame
-!= %22Setup.hs%22
+not in ignored_files
:%0A
|
930a8b1a7c980183df5469627a734033ca39a444
|
Add functional tests for create_image
|
shade/tests/functional/test_image.py
|
shade/tests/functional/test_image.py
|
Python
| 0.000012 |
@@ -0,0 +1,1689 @@
+# -*- coding: utf-8 -*-%0A%0A# Licensed under the Apache License, Version 2.0 (the %22License%22); you may%0A# not use this file except in compliance with the License. You may obtain%0A# a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS, WITHOUT%0A# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the%0A# License for the specific language governing permissions and limitations%0A# under the License.%0A%0A%22%22%22%0Atest_compute%0A----------------------------------%0A%0AFunctional tests for %60shade%60 image methods.%0A%22%22%22%0A%0Aimport tempfile%0Aimport uuid%0A%0Afrom shade import openstack_cloud%0Afrom shade.tests import base%0Afrom shade.tests.functional.util import pick_image%0A%0A%0Aclass TestCompute(base.TestCase):%0A def setUp(self):%0A super(TestCompute, self).setUp()%0A # Shell should have OS-* envvars from openrc, typically loaded by job%0A self.cloud = openstack_cloud()%0A self.image = pick_image(self.cloud.nova_client.images.list())%0A%0A def test_create_image(self):%0A test_image = tempfile.NamedTemporaryFile(delete=False)%0A test_image.write('%5C0' * 1024 * 1024)%0A test_image.close()%0A image_name = 'test-image-%25s' %25 uuid.uuid4()%0A try:%0A self.cloud.create_image(name=image_name,%0A filename=test_image.name,%0A disk_format='raw',%0A container_format='bare',%0A wait=True)%0A finally:%0A self.cloud.delete_image(image_name, wait=True)%0A
|
|
59ef02377c41041fd8010231f2c86d1aba072c0f
|
Complete recur sol
|
lc0105_construct_binary_tree_from_preorder_and_inorder_traversal.py
|
lc0105_construct_binary_tree_from_preorder_and_inorder_traversal.py
|
Python
| 0.000007 |
@@ -0,0 +1,2369 @@
+%22%22%22Leetcode 105. Construct Binary Tree from Preorder and Inorder Traversal%0AMedium%0A%0AURL: https://leetcode.com/problems/construct-binary-tree-from-preorder-and-inorder-traversal/%0A%0AGiven preorder and inorder traversal of a tree, construct the binary tree.%0A%0ANote: You may assume that duplicates do not exist in the tree.%0A%0AFor example, given%0Apreorder = %5B3,9,20,15,7%5D%0Ainorder = %5B9,3,15,20,7%5D%0AReturn the following binary tree:%0A 3%0A / %5C%0A 9 20%0A / %5C%0A 15 7%0A%22%22%22%0A%0A# Definition for a binary tree node.%0Aclass TreeNode(object):%0A def __init__(self, val):%0A self.val = val%0A self.left = None%0A self.right = None%0A%0A%0Aclass SolutionRecur(object):%0A def _build(self, pre_start, pre_end, in_start, in_end,%0A inorder_d, preorder, inorder):%0A if pre_start %3E pre_end or in_start %3E in_end:%0A return None%0A%0A # Preorder's first is root.%0A root = TreeNode(preorder%5Bpre_start%5D)%0A%0A # Get root's pos in inorder.%0A in_root_pos = inorder_d%5Broot.val%5D%0A%0A # Compute the number of left from root.%0A n_left = in_root_pos - in_start%0A%0A # Build binary trees for root's left and right.%0A root.left = self._build(pre_start + 1, pre_start + n_left, %0A in_start, in_root_pos - 1,%0A inorder_d, preorder, inorder)%0A root.right = self._build(pre_start + n_left + 1, pre_end, %0A in_root_pos + 1, in_end,%0A inorder_d, preorder, inorder)%0A%0A return root%0A%0A%0A def buildTree(self, preorder, inorder):%0A %22%22%22%0A :type preorder: List%5Bint%5D%0A :type inorder: List%5Bint%5D%0A :rtype: TreeNode%0A %22%22%22%0A # Create dict for inorder value-%3Eindex.%0A inorder_d = %7Bv: i for (i, v) in enumerate(inorder)%7D%0A%0A # Build binary tree by recursion.%0A return self._build(0, len(preorder) - 1, 0, len(inorder) - 1, %0A inorder_d, preorder, inorder)%0A%0A%0Adef main():%0A # Ans:%0A # 3%0A # / %5C%0A # 9 20%0A # / %5C%0A # 15 7%0A preorder = %5B3, 9, 20, 15, 7%5D%0A inorder = %5B9, 3, 15, 20, 7%5D%0A root = SolutionRecur().buildTree(preorder, inorder)%0A %0A print root.val%0A print root.left.val%0A print root.right.val%0A print root.right.left.val%0A print root.right.right.val%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
9c045f7667e1bdc6c9137c3877292907f4623774
|
Add a management command to check if URNs are present in the database
|
make_a_plea/management/commands/check_urns_in_db.py
|
make_a_plea/management/commands/check_urns_in_db.py
|
Python
| 0 |
@@ -0,0 +1,1405 @@
+import csv%0Afrom django.core.management.base import BaseCommand%0A%0A%0Afrom apps.plea.models import DataValidation, Case%0Afrom apps.plea.standardisers import standardise_urn, format_for_region%0A%0A%0Aclass Command(BaseCommand):%0A help = %22Build weekly aggregate stats%22%0A%0A def add_arguments(self, parser):%0A parser.add_argument('csv_file', nargs='+')%0A%0A def handle(self, *args, **options):%0A with open(options%5B'csv_file'%5D%5B0%5D) as csvfile:%0A total_matched, total_missed, matched, missed = 0, 0, 0, 0%0A%0A for row in csvfile.readlines():%0A if not row.strip():%0A continue%0A elif row.startswith(%22#%22):%0A if matched %3E 0 or missed %3E 0:%0A print %22----------------%5CnMatched %7B%7D%5CnMissed %7B%7D%5Cn%5Cn%22.format(matched, missed)%0A total_matched += matched%0A total_missed += missed%0A matched = 0%0A missed = 0%0A print row%0A else:%0A urn = standardise_urn(row)%0A if Case.objects.filter(urn__iexact=urn).exists():%0A matched += 1%0A else:%0A missed += 1%0A print %22%7B%7D - failed%22.format(urn)%0A%0A print %22----------------%5CnTotal:%5CnMatched %7B%7D%5CnMissed %7B%7D%22.format(total_matched, total_missed)
|
|
4759cf1b058d1a1b5999882a8b44f84ad89a8a9a
|
Add tests file
|
arangodb/tests.py
|
arangodb/tests.py
|
Python
| 0.000001 |
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-%0A
|
|
64b572a4e1e8359d781591e22439fb432c5860b6
|
Create click_location.py
|
click_location.py
|
click_location.py
|
Python
| 0.000001 |
@@ -0,0 +1,188 @@
+from PIL import Image%0Afrom pylab import *%0A%0Aim = array(Image.open('img.jpg'))%0Ashow()%0Awhile(1):%0A imshow(im)%0A print %22Please click 3 points%22%0A x = ginput(1)%0A print 'you clicked:',x%0A
|
|
d6e9971ceefc69f0eefc7440cc5e7035e7dcc05d
|
Add the middleware for reporting errors to gcloud.
|
contentcuration/contentcuration/middleware/ErrorReportingMiddleware.py
|
contentcuration/contentcuration/middleware/ErrorReportingMiddleware.py
|
Python
| 0 |
@@ -0,0 +1,264 @@
+from google.cloud import error_reporting%0A%0A%0Aclass ErrorReportingMiddleware(object):%0A def __init__(self, *args, **kwargs):%0A self.client = error_reporting.Client()%0A%0A def process_exception(self, request, exception):%0A self.client.report_exception()%0A
|
|
e3ab7c126f808864f0458b52f36518e485f546ca
|
Add a session class to help tie everything together in a convenient way.
|
source/harmony/session.py
|
source/harmony/session.py
|
Python
| 0 |
@@ -0,0 +1,2373 @@
+# :coding: utf-8%0A# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips%0A# :license: See LICENSE.txt.%0A%0Aimport os%0A%0Afrom harmony.schema.collection import Collection%0Afrom harmony.schema.collector import FilesystemCollector%0Afrom harmony.schema.processor import MixinProcessor, ValidateProcessor%0Afrom harmony.schema.validator import Validator%0A%0A%0Aclass Session(object):%0A '''A configuration of the various components in a standard way.'''%0A%0A def __init__(self, collector=None, processors=None, validator_class=None):%0A '''Initialise session.%0A%0A *collector* is used to collect schemas for use in the session and%0A should conform to the :py:class:%60~harmony.schema.collector.Collector%60%0A interface. Defaults to a%0A :py:class:%60~harmony.schema.collector.FileSystemCollector%60 using the%0A environment variable :envvar:%60HARMONY_SCHEMA_PATH%60 to discover schemas.%0A%0A *processors* specifies a list of%0A :py:class:%60~harmony.schema.processor.Processor%60 instances that will%0A post-process any discovered schemas. If not specified will default to%0A %5B:py:class:%60~harmony.schema.processor.ValidateProcessor%60,%0A :py:class:%60~harmony.schema.processor.MixinProcessor%60%5D.%0A%0A *validator_class* should be the class to use for validation of schemas%0A and instances. Defaults to%0A :py:class:%60harmony.schema.validator.Validator%60.%0A%0A '''%0A self.schemas = Collection()%0A%0A self.collector = collector%0A if self.collector is None:%0A paths = os.environ.get('HARMONY_SCHEMA_PATH', '').split(os.pathsep)%0A self.collector = FilesystemCollector(paths)%0A%0A self.validator_class = validator_class%0A if self.validator_class is None:%0A self.validator_class = Validator%0A%0A self.processors = processors%0A if self.processors is None:%0A self.processors = %5B%0A ValidateProcessor(self.validator_class), MixinProcessor()%0A %5D%0A%0A self.refresh()%0A%0A def refresh(self):%0A '''Discover schemas and add to local collection.%0A%0A .. note::%0A%0A Collection will be processed with self.processors.%0A%0A '''%0A self.schemas.clear()%0A for schema in self.collector.collect():%0A self.schemas.add(schema)%0A%0A for processor in self.processors:%0A processor.process(self.schemas)%0A%0A
|
|
c2036cd7629b93bfc12069eaf174f2427d47e769
|
add another test
|
tests/monitoring/test_check_mesos_duplicate_frameworks.py
|
tests/monitoring/test_check_mesos_duplicate_frameworks.py
|
Python
| 0.000004 |
@@ -0,0 +1,2964 @@
+# Copyright 2015-2016 Yelp Inc.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0Aimport mock%0Aimport pytest%0A%0Afrom paasta_tools.monitoring.check_mesos_duplicate_frameworks import check_mesos_no_duplicate_frameworks%0A%0A%0Adef test_check_mesos_no_duplicate_frameworks_ok(capfd):%0A with mock.patch(%0A 'paasta_tools.monitoring.check_mesos_duplicate_frameworks.parse_args', autospec=True,%0A ) as mock_parse_args, mock.patch(%0A 'paasta_tools.monitoring.check_mesos_duplicate_frameworks.get_mesos_master', autospec=True,%0A ) as mock_get_mesos_master:%0A mock_opts = mock.MagicMock()%0A mock_opts.check = 'marathon,chronos'%0A mock_parse_args.return_value = mock_opts%0A mock_master = mock.MagicMock()%0A mock_master.state = %7B%0A 'frameworks': %5B%0A %7B'name': 'marathon'%7D,%0A %7B'name': 'chronos'%7D,%0A %7B'name': 'foobar'%7D,%0A %7B'name': 'foobar'%7D,%0A %5D,%0A %7D%0A mock_get_mesos_master.return_value = mock_master%0A%0A with pytest.raises(SystemExit) as error:%0A check_mesos_no_duplicate_frameworks()%0A out, err = capfd.readouterr()%0A assert %22OK%22 in out%0A assert %22marathon%22 in out%0A assert %22chronos%22 in out%0A assert %22foobar%22 not in out%0A assert error.value.code == 0%0A%0A%0Adef test_check_mesos_no_duplicate_frameworks_critical(capfd):%0A with mock.patch(%0A 'paasta_tools.monitoring.check_mesos_duplicate_frameworks.parse_args', autospec=True,%0A ) as mock_parse_args, mock.patch(%0A 'paasta_tools.monitoring.check_mesos_duplicate_frameworks.get_mesos_master', autospec=True,%0A ) as mock_get_mesos_master:%0A mock_opts = mock.MagicMock()%0A mock_opts.check = 'marathon,chronos'%0A mock_parse_args.return_value = mock_opts%0A mock_master = mock.MagicMock()%0A mock_master.state = %7B%0A 'frameworks': %5B%0A %7B'name': 'marathon'%7D,%0A %7B'name': 'marathon'%7D,%0A %7B'name': 'chronos'%7D,%0A %7B'name': 'foobar'%7D,%0A %7B'name': 'foobar'%7D,%0A %5D,%0A %7D%0A mock_get_mesos_master.return_value = mock_master%0A%0A with pytest.raises(SystemExit) as error:%0A check_mesos_no_duplicate_frameworks()%0A out, err = capfd.readouterr()%0A assert %22CRITICAL%22 in out%0A assert %22marathon%22 in out%0A assert %22chronos%22 in out%0A assert %22foobar%22 not in out%0A assert error.value.code == 2%0A
|
|
21462355d04264fff88232a638f218de926061e8
|
send Bob's presence when joining the room
|
tests/twisted/muc/test-muc-invitation.py
|
tests/twisted/muc/test-muc-invitation.py
|
"""
Test MUC invitations.
"""
import dbus
from twisted.words.xish import domish, xpath
from gabbletest import exec_test, make_muc_presence
from servicetest import call_async, EventPattern
import constants as cs
def test(q, bus, conn, stream):
conn.Connect()
q.expect('dbus-signal', signal='StatusChanged', args=[0, 1])
# Bob has invited us to an activity.
message = domish.Element((None, 'message'))
message['from'] = '[email protected]'
message['to'] = 'test@localhost'
x = message.addElement(('http://jabber.org/protocol/muc#user', 'x'))
invite = x.addElement((None, 'invite'))
invite['from'] = 'bob@localhost'
reason = invite.addElement((None, 'reason'))
reason.addContent('No good reason')
stream.send(message)
event = q.expect('dbus-signal', signal='NewChannel')
assert event.args[1] == cs.CHANNEL_TYPE_TEXT
assert event.args[2] == 2 # handle type
assert event.args[3] == 1 # handle
room_handle = 1
text_chan = bus.get_object(conn.bus_name, event.args[0])
group_iface = dbus.Interface(text_chan, cs.CHANNEL_IFACE_GROUP)
members = group_iface.GetMembers()
local_pending = group_iface.GetLocalPendingMembers()
remote_pending = group_iface.GetRemotePendingMembers()
assert len(members) == 1
assert conn.InspectHandles(1, members)[0] == 'bob@localhost'
bob_handle = members[0]
assert len(local_pending) == 1
# FIXME: the username-part-is-nickname assumption
assert conn.InspectHandles(1, local_pending)[0] == \
'[email protected]/test'
assert len(remote_pending) == 0
room_self_handle = group_iface.GetSelfHandle()
assert room_self_handle == local_pending[0]
channel_props = text_chan.GetAll(
cs.CHANNEL, dbus_interface=dbus.PROPERTIES_IFACE)
assert channel_props['TargetID'] == '[email protected]', channel_props
assert channel_props['Requested'] == False
assert channel_props['InitiatorID'] == 'bob@localhost'
assert channel_props['InitiatorHandle'] == bob_handle
# set ourselves to away and back again, to check that we don't send any
# presence to the MUC before the invite has been accepted
conn.Presence.SetStatus({'away':{'message':'failure'}})
conn.Presence.SetStatus({'available':{'message':'success'}})
# accept the invitation
call_async(q, group_iface, 'AddMembers', [room_self_handle], 'Oh, OK then')
event, event2, _ = q.expect_many(
EventPattern('stream-presence', to='[email protected]/test'),
EventPattern('dbus-signal', signal='MembersChanged'),
EventPattern('dbus-return', method='AddMembers')
)
# check that the status we joined with was available / success
elem = event.stanza
show = [e for e in elem.elements() if e.name == 'show']
assert not show
status = [e for e in elem.elements() if e.name == 'status'][0]
assert status
assert status.children[0] == u'success'
# We are added as remote pending while joining the room. The inviter (Bob)
# is removed for now. It will be re-added with his channel specific handle
# once we have joined.
assert event2.args == ['', [], [bob_handle], [],
[room_self_handle], 0, cs.GC_REASON_INVITED]
# Send presence for own membership of room.
stream.send(make_muc_presence('owner', 'moderator', '[email protected]', 'test'))
event = q.expect('dbus-signal', signal='MembersChanged')
assert event.args == ['', [room_self_handle], [], [], [], 0, 0]
# Test sending an invitation
alice_handle = conn.RequestHandles(1, ['alice@localhost'])[0]
call_async(q, group_iface, 'AddMembers', [alice_handle],
'I want to test invitations')
event = q.expect('stream-message', to='[email protected]')
message = event.stanza
x = xpath.queryForNodes('/message/x', message)
assert (x is not None and len(x) == 1), repr(x)
assert x[0].uri == 'http://jabber.org/protocol/muc#user'
invites = xpath.queryForNodes('/x/invite', x[0])
assert (invites is not None and len(invites) == 1), repr(invites)
assert invites[0]['to'] == 'alice@localhost'
reasons = xpath.queryForNodes('/invite/reason', invites[0])
assert (reasons is not None and len(reasons) == 1), repr(reasons)
assert str(reasons[0]) == 'I want to test invitations'
conn.Disconnect()
q.expect('dbus-signal', signal='StatusChanged', args=[2, 1])
if __name__ == '__main__':
exec_test(test)
|
Python
| 0 |
@@ -3275,16 +3275,154 @@
VITED%5D%0A%0A
+ # Send presence for Bob's membership of room.%0A stream.send(make_muc_presence('owner', 'moderator', '[email protected]', 'bob'))%0A%0A
# Se
@@ -3453,32 +3453,32 @@
ership of room.%0A
-
stream.send(
@@ -3610,16 +3610,106 @@
hanged')
+%0A%0A room_bob_handle = conn.RequestHandles(cs.HT_CONTACT, %5B'[email protected]/bob'%5D)%5B0%5D
%0A ass
@@ -3744,24 +3744,41 @@
_self_handle
+, room_bob_handle
%5D, %5B%5D, %5B%5D, %5B
|
0b81997dd12f775fc9f814c19fb62ef35bde998e
|
Add ceres library
|
autoconf/ceres.py
|
autoconf/ceres.py
|
Python
| 0.000001 |
@@ -0,0 +1,286 @@
+from _external import *%0Afrom pthread import *%0Afrom amd import *%0Afrom gomp import *%0Afrom lapack import *%0Afrom suitesparse import *%0Afrom glog import *%0A%0Aceres = LibWithHeaderChecker('ceres', 'ceres/ceres.h', 'c++', name='ceres', dependencies = %5Bgomp,lapack,suitesparse,amd,pthread,glog%5D,)%0A
|
|
681f73490fd7d333883134a417477492744ce22a
|
Add project permissions
|
src/python/expedient/clearinghouse/project/permissions.py
|
src/python/expedient/clearinghouse/project/permissions.py
|
Python
| 0.000001 |
@@ -0,0 +1,1895 @@
+'''%0ACreated on Aug 3, 2010%0A%0A@author: jnaous%0A'''%0Afrom expedient.common.permissions.shortcuts import create_permission%0Afrom expedient.clearinghouse.permissionmgmt.utils import %5C%0A request_permission_wrapper%0A%0Acreate_permission(%0A %22can_create_project%22,%0A description=%5C%0A %22Owners of this permission can create projects in Expedient.%22,%0A view=request_permission_wrapper,%0A)%0A%0Acreate_permission(%0A %22can_edit_project%22,%0A description=%5C%0A %22Owners of this permission can edit basic project properties.%22,%0A view=request_permission_wrapper,%0A)%0A%0Acreate_permission(%0A %22can_delete_project%22,%0A description=%5C%0A %22Owners of this permission can edit basic project properties.%22,%0A view=request_permission_wrapper,%0A)%0A%0Acreate_permission(%0A %22can_view_project%22,%0A description=%5C%0A %22Owners of this permission can view the project. Without %22%0A %22other permissions, they are non-functional members.%22,%0A view=request_permission_wrapper,%0A)%0A%0Acreate_permission(%0A %22can_add_members%22,%0A description=%5C%0A %22Owners of this permission can add members to %22%0A %22the project and assign to them roles.%22,%0A view=request_permission_wrapper,%0A)%0A%0Acreate_permission(%0A %22can_remove_members%22,%0A description=%5C%0A %22Owners of this permission can remove members from %22%0A %22the project.%22,%0A view=request_permission_wrapper,%0A)%0A%0Acreate_permission(%0A %22can_create_slices%22,%0A description=%5C%0A %22Owners of this permission can create new slices.%22,%0A view=request_permission_wrapper,%0A)%0A%0Acreate_permission(%0A %22can_add_aggregates%22,%0A description=%5C%0A %22Owners of this permission can add aggregates %22%0A %22to the project.%22,%0A view=request_permission_wrapper,%0A)%0A%0Acreate_permission(%0A %22can_remove_aggregates%22,%0A description=%5C%0A %22Owners of this permission can remove aggregates %22%0A %22from the project.%22,%0A view=request_permission_wrapper,%0A)%0A
|
|
bd9fce88c235ea6be032a1d15a31bf41df14a444
|
Fix missing migration
|
djangocms_blog/migrations/0033_auto_20180226_1410.py
|
djangocms_blog/migrations/0033_auto_20180226_1410.py
|
Python
| 0.999773 |
@@ -0,0 +1,487 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('djangocms_blog', '0032_auto_20180109_0023'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='blogcategorytranslation',%0A name='meta_description',%0A field=models.TextField(blank=True, verbose_name='category meta description', default=''),%0A ),%0A %5D%0A
|
|
07ef4a6440b59ed6ce207d3442f7ba950a8f9ec8
|
Python script to compute cost.
|
compute_cost.py
|
compute_cost.py
|
Python
| 0.999849 |
@@ -0,0 +1,1799 @@
+%22%22%22compute_cost.py: %0A%0A%22%22%22%0A %0A__author__ = %22Dilawar Singh%22%0A__copyright__ = %22Copyright 2016, Dilawar Singh%22%0A__credits__ = %5B%22NCBS Bangalore%22%5D%0A__license__ = %22GNU GPL%22%0A__version__ = %221.0.0%22%0A__maintainer__ = %22Dilawar Singh%22%0A__email__ = %[email protected]%22%0A__status__ = %22Development%22%0A%0Aimport sys%0Aimport os%0Aimport random%0Aimport datetime%0A%0A__fmt__ = '%25Y-%25m-%25d'%0A%0Adef computeCost( currentDate, lastDate, nAWS ):%0A ndays = ( lastDate - currentDate ).days %0A maxAWS = 5%0A nyears = ndays / 365.0%0A if ndays %3C 365.0 or nAWS %3E maxAWS:%0A return int( 100 * 20.0 )%0A%0A cost = 3 * nyears %0A if nAWS %3E 2:%0A cost += 3 * (maxAWS - nAWS) - (20.0/nAWS) * ( nyears - 1)%0A%0A return int( 100 * max( 0, cost ))%0A%0Adef random_date(start, end):%0A %22%22%22%0A This function will return a random datetime between two datetime %0A objects.%0A %22%22%22%0A delta = end - start%0A int_delta = (delta.days * 24.0 * 60 * 60) + delta.seconds%0A random_second = randrange(int_delta)%0A return start + datetime.timedelta(seconds=random_second)%0A%0Adef test( ):%0A import pylab%0A # Generate random test data.%0A start = datetime.datetime.strptime( '2017-03-18', __fmt__ )%0A end = datetime.datetime.strptime( '2021-03-18', __fmt__ )%0A for naws in range( 0, 5 ):%0A xval, yval = %5B %5D, %5B %5D%0A for i in range( 5* 54 ):%0A date = start + datetime.timedelta( days = i * 7 )%0A xval.append( (date - start).days / 365.0 )%0A yval.append( computeCost( start, date, naws ) )%0A pylab.xlabel( 'Year' )%0A pylab.ylabel( 'Cost' )%0A pylab.plot( xval, yval, alpha = 0.7, label = '%25s' %25 naws )%0A pylab.legend( )%0A%0A pylab.savefig( %22%25s.png%22 %25 sys.argv%5B0%5D )%0A%0A%0Aif __name__ == '__main__':%0A test()%0A
|
|
7a0bbdb2395ca1e8579e0f2cc6ccd43807c51161
|
Create 6kyu_alpha_to_numeric_and_numeric_to_alpha.py
|
Solutions/6kyu/6kyu_alpha_to_numeric_and_numeric_to_alpha.py
|
Solutions/6kyu/6kyu_alpha_to_numeric_and_numeric_to_alpha.py
|
Python
| 0.999268 |
@@ -0,0 +1,191 @@
+import re%0A%0Adef AlphaNum_NumAlpha(string):%0A return ''.join(swap(s) for s in re.findall('%5Cd%7B1,2%7D%7C%5Ba-z%5D', string))%0A%0A%0Adef swap(s):%0A return chr(int(s)+96) if s.isdigit() else str(ord(s)-96)%0A
|
|
20c51dbcd2d90bfa234efa5027254a4915995edd
|
add nmap_hosts migration
|
alembic/versions/13b7c3d4c802_create_nmap_hosts_table.py
|
alembic/versions/13b7c3d4c802_create_nmap_hosts_table.py
|
Python
| 0 |
@@ -0,0 +1,786 @@
+%22%22%22create nmap_hosts table%0A%0ARevision ID: 13b7c3d4c802%0ARevises: ecd5f49567a6%0ACreate Date: 2017-07-21 08:19:17.849112%0A%0A%22%22%22%0Afrom sqlalchemy.dialects import postgresql%0Afrom alembic import op%0Aimport sqlalchemy as sa%0Aimport datetime%0A%0A%0Adef _get_date():%0A return datetime.datetime.now()%0A%0A# revision identifiers, used by Alembic.%0Arevision = '13b7c3d4c802'%0Adown_revision = 'ecd5f49567a6'%0Abranch_labels = None%0Adepends_on = None%0A%0A%0Adef upgrade():%0A op.create_table('nmap_hosts',%0A sa.Column('id', sa.Integer, primary_key=True, nullable=False),%0A sa.Column('ip_addr', postgresql.INET, unique=True, nullable=False),%0A sa.Column('created_at', sa.TIMESTAMP(timezone=False), default=_get_date))%0A%0A%0Adef downgrade():%0A op.drop_table('nmap_hosts')%0A
|
|
941985a561d0bdce1a8aba2e57fc60f90b6164fb
|
Add jrun main module so "python jrun" works
|
jrun/__main__.py
|
jrun/__main__.py
|
Python
| 0.000036 |
@@ -0,0 +1,60 @@
+import jrun%0Aif __name__ == '__main__':%0A jrun.jrun_main()%0A
|
|
b27b3089f393a84c3d004e8d89be43165862be1d
|
add match matrix
|
matrix_match.py
|
matrix_match.py
|
Python
| 0.000002 |
@@ -0,0 +1,1296 @@
+import random%0A%0Aw1 = int(raw_input(%22%3E%3E%3E w1: %22))%0Ah1 = int(raw_input(%22%3E%3E%3E h1: %22))%0Aw2 = int(raw_input(%22%3E%3E%3E w2: %22))%0Ah2 = int(raw_input(%22%3E%3E%3E h2: %22))%0A%0Ar1 = int(raw_input(%22%3E%3E%3E r1: %22))%0Ar2 = int(raw_input(%22%3E%3E%3E r2: %22))%0A%0A# w1 = 20%0A# h1 = 20%0A# w2 = 3%0A# h2 = 3%0A%0Amatrix = %5B%5Brandom.randint(r1, r2) for x in range(w1)%5D for x in range(h1)%5D%0Apattern = %5B%5Brandom.randint(r1, r2) for x in range(w2)%5D for x in range(h2)%5D%0A%0A%0Adef matchMatrix(matrix1, matrix2):%0A print 'Match Matrix start:%5Cn '%0A results = %5B%5D%0A temp = %5B%5D%0A for x in matrix2:%0A for y in x:%0A temp.append(y)%0A%0A indexOfX = 0%0A for x in matrix1:%0A if indexOfX %3E= (h1-h2+1):%0A break%0A indexOfY = 0%0A for y in x:%0A if indexOfY %3E= (w1-w2+1):%0A break%0A count = 0%0A for z in matrix2:%0A subMatrix = matrix%5BindexOfX+count%5D%0A count+=1%0A size = len(z)%0A subX = subMatrix%5BindexOfY:indexOfY+size%5D %0A if z != subX:%0A break%0A if count == h2:%0A results.append((indexOfX, indexOfY))%0A indexOfY+=1%0A indexOfX+=1%0A return results%0A%0Afor x in pattern:%0A print x%0Afor x in matrix:%0A print x%0A%0Aprint 'Ans:%5Cn%25s' %25 (matchMatrix(matrix, pattern))
|
|
40f92e6293bb13ee1462b932be15f5f11ceeee74
|
Add initial implementation of TempType.
|
compiler/infer.py
|
compiler/infer.py
|
Python
| 0 |
@@ -0,0 +1,1115 @@
+%22%22%22%0A# ----------------------------------------------------------------------%0A# infer.py%0A#%0A# Type inference for Llama%0A# http://courses.softlab.ntua.gr/compilers/2012a/llama2012.pdf%0A#%0A# Authors: Nick Korasidis %[email protected]%3E%0A# Dimitris Koutsoukos %[email protected]%3E%0A# ----------------------------------------------------------------------%0A%22%22%22%0A%0A%0Aclass TempType:%0A %22%22%22A temporary type used during inference.%22%22%22%0A%0A _next_free = 1 # Next free papaki.%0A%0A @classmethod%0A def _get_next_tag(cls):%0A cls._next_free += 1%0A return cls._next_free%0A%0A def __init__(self, node, spec_type=None):%0A %22%22%22%0A Construct a new temporary type for node %60node%60.%0A%0A The user may optionally supply a type for this node;%0A such a specification is not binding but will improve%0A error reporting.%0A %22%22%22%0A self._node = node%0A self._spec_type = spec_type%0A self._inferred_type = None%0A%0A self._tag = self._get_next_tag()%0A%0A def write_back(self):%0A self._node.type = self._inferred_type%0A # TODO: Validate the type before returning.%0A
|
|
a2e27feff324d5aed7220a520df651f688cd1829
|
Add migration
|
bluebottle/assignments/migrations/0002_auto_20190529_1755.py
|
bluebottle/assignments/migrations/0002_auto_20190529_1755.py
|
Python
| 0.000002 |
@@ -0,0 +1,425 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.15 on 2019-05-29 15:45%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('assignments', '0001_initial'),%0A %5D%0A%0A operations = %5B%0A migrations.RenameField(%0A model_name='assignment',%0A old_name='end',%0A new_name='end_time',%0A ),%0A %5D%0A
|
|
9ca926d052edc754ca3b6f3663b1c00887b2965a
|
add migration with blank projects.Tag
|
brasilcomvc/projects/migrations/0004_tag_may_be_blank.py
|
brasilcomvc/projects/migrations/0004_tag_may_be_blank.py
|
Python
| 0.000001 |
@@ -0,0 +1,453 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('projects', '0003_project_tags'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='project',%0A name='tags',%0A field=models.ManyToManyField(to='projects.Tag', blank=True),%0A preserve_default=True,%0A ),%0A %5D%0A
|
|
a6d3ae8b27f6e97e7e5b4388a20836f25953c26d
|
Add example config file
|
config-example.py
|
config-example.py
|
Python
| 0.000001 |
@@ -0,0 +1,374 @@
+%22%22%22%0AMinimal config file for kahvibot. Just define values as normal Python code.%0A%22%22%22%0A%0A# put your bot token here as a string%0Abot_token = %22%22%0A%0A# the tg username of the bot's admin.%0Aadmin_username = %22%22%0A%0A# if a message contains any of these words, the bot responds%0Atrigger_words = %5B%0A %22kahvi%22,%0A %22%5Cu2615%22, # coffee emoji%0A %22tsufe%22,%0A %22kahavi%22,%0A #%22sima%22, # wappu mode%0A%5D%0A
|
|
bcda14f8258daaf3475dd9d3ca3eb7b25aa0496c
|
Add py-voluptuous (#13457)
|
var/spack/repos/builtin/packages/py-voluptuous/package.py
|
var/spack/repos/builtin/packages/py-voluptuous/package.py
|
Python
| 0 |
@@ -0,0 +1,614 @@
+# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass PyVoluptuous(PythonPackage):%0A %22%22%22Voluptous, despite the name, is a Python data validation library.%22%22%22%0A homepage = %22https://github.com/alecthomas/voluptuous%22%0A url = %22https://github.com/alecthomas/voluptuous/archive/0.11.5.tar.gz%22%0A%0A version('0.11.5', sha256='01adf0b6c6f61bd11af6e10ca52b7d4057dd0be0343eb9283c878cf3af56aee4')%0A%0A depends_on('py-setuptools', type='build')%0A
|
|
93000ab88c489f720d0f7e6a8921dc69342d61f1
|
Add migration
|
webapp/apps/dynamic/migrations/0012_auto_20160616_1908.py
|
webapp/apps/dynamic/migrations/0012_auto_20160616_1908.py
|
Python
| 0.000002 |
@@ -0,0 +1,408 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('dynamic', '0011_auto_20160614_1902'),%0A %5D%0A%0A operations = %5B%0A migrations.RenameField(%0A model_name='dynamicbehaviorsaveinputs',%0A old_name='BE_CG_per',%0A new_name='BE_cg',%0A ),%0A %5D%0A
|
|
662ad845a0ce729d8d8b72121a4c7c6f22e3eaa2
|
support for phonetic similarity added
|
src/indicnlp/script/phonetic_sim.py
|
src/indicnlp/script/phonetic_sim.py
|
Python
| 0 |
@@ -0,0 +1,2391 @@
+# Copyright Anoop Kunchukuttan 2014 - present%0A#%0A# This file is part of Indic NLP Library.%0A# %0A# Indic NLP Library is free software: you can redistribute it and/or modify%0A# it under the terms of the GNU General Public License as published by%0A# the Free Software Foundation, either version 3 of the License, or%0A# (at your option) any later version.%0A# %0A# Indic NLP Library is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU General Public License for more details.%0A# %0A# You should have received a copy of the GNU General Public License%0A# along with Indic NLP Library. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A#%0A%0Afrom indicnlp import loader%0Afrom indicnlp import langinfo%0Afrom indicnlp.script.indic_scripts import * %0Aimport numpy as np%0Aimport gzip%0Aimport pandas as pd%0Aimport codecs,sys %0A%0Adef equal(v1,v2): %0A return 0.0 if np.sum( xor_vectors(v1, v2)) %3E 0 else 1.0%0A%0Adef dice(v1,v2):%0A dotprod=2*float(np.dot( v1, v2.T ))%0A return dotprod/float(len(v1)+len(v2))%0A%0Adef jaccard(v1,v2):%0A dotprod=float(np.dot( v1, v2.T ))%0A return dotprod/float(len(v1)+len(v2)-dotprod)%0A%0Adef cosine(v1,v2):%0A dotprod=float(np.dot( v1, v2.T ))%0A norm1=float(np.dot( v1, v1.T ))%0A norm2=float(np.dot( v2, v2.T ))%0A return ((dotprod)/(np.sqrt(norm1*norm2)+0.00001))%0A%0Adef dotprod(v1,v2): %0A return float(np.dot( v1, v2.T ))%0A%0Adef sim1(v1,v2,base=5.0): %0A return np.power(base,dotprod(v1,v2)) %0A%0Adef softmax(v1,v2): %0A return sim1(v1,v2,np.e)%0A%0Adef create_similarity_matrix(sim_func,slang,tlang,normalize=True):%0A%0A dim=langinfo.COORDINATED_RANGE_END_INCLUSIVE-langinfo.COORDINATED_RANGE_START_INCLUSIVE+1 %0A sim_mat=np.zeros((dim,dim)) %0A%0A for offset1 in xrange(langinfo.COORDINATED_RANGE_START_INCLUSIVE, langinfo.COORDINATED_RANGE_END_INCLUSIVE+1): %0A v1=get_phonetic_feature_vector(offset_to_char(offset1,slang),slang)%0A for offset2 in xrange(langinfo.COORDINATED_RANGE_START_INCLUSIVE, langinfo.COORDINATED_RANGE_END_INCLUSIVE+1): %0A v2=get_phonetic_feature_vector(offset_to_char(offset2,tlang),tlang)%0A sim_mat%5Boffset1,offset2%5D=sim_func(v1,v2)%0A%0A if normalize: %0A sums=np.sum(sim_mat, axis=1)%0A sim_mat=(sim_mat.transpose()/sums).transpose()%0A%0A return sim_mat%0A%0A
|
|
30ea7b5c77acc0af8826e3aef6155f9d329ed419
|
Create getCpuUsage2.py
|
mesosmetrics/getCpuUsage2.py
|
mesosmetrics/getCpuUsage2.py
|
Python
| 0 |
@@ -0,0 +1,1195 @@
+import urllib%0Aimport json%0Aimport time%0Aimport sys%0A%0A%0Aif __name__ == '__main__':%0A%0A%09agent=sys.argv%5B1%5D%0A%0A%09url = %22http://%22 + agent + %22:5051/monitor/statistics%22%0A%0A%0A%09executors = %7B%7D%0A%0A%0A%09response = urllib.urlopen(url)%0A%0A%09data = json.loads(response.read())%0A%0A%09for itm in data:%0A%09%09executor = %7B%7D%0A%09%09id = itm%5B%22executor_id%22%5D%0A%09%09executor%5B%22name%22%5D = itm%5B%22executor_name%22%5D%0A%0A%09%09a = %7B%7D%0A%09%09a%5B%22cpu_system%22%5D = itm%5B%22statistics%22%5D%5B%22cpus_system_time_secs%22%5D%0A%09%09a%5B%22cpu_user%22%5D = itm%5B%22statistics%22%5D%5B%22cpus_user_time_secs%22%5D%0A%09%09a%5B%22ts%22%5D = itm%5B%22statistics%22%5D%5B%22timestamp%22%5D%0A%09%09executor%5B%22a%22%5D = a%0A%0A%09%09executors%5Bid%5D = executor%0A%0A%09time.sleep(5)%0A%09response = urllib.urlopen(url)%0A%0A%09data = json.loads(response.read())%0A%0A %0A%09for itm in data:%0A%09%09id = itm%5B%22executor_id%22%5D%0A%0A%09%09b = %7B%7D%0A%09%09b%5B%22cpu_system%22%5D = itm%5B%22statistics%22%5D%5B%22cpus_system_time_secs%22%5D%0A%09%09b%5B%22cpu_user%22%5D = itm%5B%22statistics%22%5D%5B%22cpus_user_time_secs%22%5D%0A%09%09b%5B%22ts%22%5D = itm%5B%22statistics%22%5D%5B%22timestamp%22%5D%0A%0A%0A%09%09executors%5Bid%5D%5B%22b%22%5D = b%0A%0A%0A%09for id,itm in executors.items():%0A%09%09cpus_total_usage = ((itm%5B%22b%22%5D%5B%22cpu_system%22%5D-itm%5B%22a%22%5D%5B%22cpu_system%22%5D) + %5C%0A (itm%5B%22b%22%5D%5B%22cpu_user%22%5D-itm%5B%22a%22%5D%5B%22cpu_user%22%5D)) / %5C%0A (itm%5B%22b%22%5D%5B%22ts%22%5D-itm%5B%22a%22%5D%5B%22ts%22%5D)%0A%09%09print(str(id) + %22 : %22 + str(cpus_total_usage))%0A%0A%0A
|
|
93a41a7d406e5f7c264865d96c0f85b1181e5cb0
|
add basic test
|
tests/utils_tests/extension_tests/test_forward.py
|
tests/utils_tests/extension_tests/test_forward.py
|
Python
| 0.000022 |
@@ -0,0 +1,2054 @@
+import mock%0Aimport numpy as np%0Aimport unittest%0A%0Aimport chainer%0Afrom chainer import testing%0A%0Afrom chainercv.utils import forward%0A%0A%[email protected](*testing.product(%7B%0A 'in_shapes': %5B((3, 4),), ((3, 4), (5,))%5D,%0A 'out_shapes': %5B((3, 4),), ((3, 4), (5,))%5D,%0A 'variable': %5BTrue, False%5D,%0A%7D))%0Aclass TestForward(unittest.TestCase):%0A%0A def setUp(self):%0A self.xp = np%0A%0A self.mocked_model = mock.MagicMock()%0A self.mocked_model.xp = self.xp%0A%0A self.inputs = tuple(np.empty(shape) for shape in self.in_shapes)%0A if len(self.inputs) == 1:%0A self.inputs = self.inputs%5B0%5D%0A%0A self.outputs = tuple(%0A self.xp.array(np.empty(shape)) for shape in self.out_shapes)%0A if self.variable:%0A self.outputs = tuple(%0A chainer.Variable(output) for output in self.outputs)%0A if len(self.outputs) == 1:%0A self.outputs = self.outputs%5B0%5D%0A%0A def _check_inputs(self, inputs):%0A if isinstance(self.inputs, tuple):%0A orig_inputs = self.inputs%0A else:%0A orig_inputs = self.inputs,%0A%0A for orig, in_ in zip(orig_inputs, inputs):%0A self.assertIsInstance(in_, chainer.Variable)%0A self.assertEqual(chainer.cuda.get_array_module(in_.data), self.xp)%0A%0A in_ = chainer.cuda.to_cpu(in_.data)%0A np.testing.assert_equal(in_, orig)%0A%0A def _check_outputs(self, outputs):%0A if len(outputs) == 1:%0A outputs = outputs,%0A%0A for orig, out in zip(self.outputs, outputs):%0A self.assertIsInstance(out, np.ndarray)%0A%0A if self.variable:%0A orig = orig.data%0A orig = chainer.cuda.to_cpu(orig)%0A np.testing.assert_equal(out, orig)%0A%0A def test_forward(self):%0A def _call(*inputs):%0A self._check_inputs(inputs)%0A return self.outputs%0A self.mocked_model.side_effect = _call%0A%0A outputs = forward(self.mocked_model, self.inputs)%0A self._check_outputs(outputs)%0A%0A%0Atesting.run_module(__name__, __file__)%0A
|
|
4cc1c75356ac97632345c1900d45ac74521079cd
|
Find an average
|
ch03_03_p.py
|
ch03_03_p.py
|
Python
| 0.999994 |
@@ -0,0 +1,273 @@
+summation = 0%0Ainput_number = float(input()) %0Anumber_of_input = 0%0A%0Awhile input_number != -1:%0A number_of_input += 1%0A summation += input_number%0A input_number = float(input())%0A%0Aif 0 == number_of_input:%0A print(%22No Data%22)%0Aelse:%0A print(summation / number_of_input)%0A
|
|
133da92ed69aafc6c0a8d4466cf3b0266c5edc68
|
Add migration for change in profile model.
|
userprofile/migrations/0006_auto_20180309_2215.py
|
userprofile/migrations/0006_auto_20180309_2215.py
|
Python
| 0 |
@@ -0,0 +1,482 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11 on 2018-03-09 22:15%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('userprofile', '0005_auto_20171121_1923'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='profile',%0A name='image',%0A field=models.ImageField(default=None, upload_to='profilepictures'),%0A ),%0A %5D%0A
|
|
c0adf6df21e85c1a0462e59078d7a26af9c7dceb
|
Add error to see if flake8 catches it
|
checklist.py
|
checklist.py
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import collections
import os
import os.path
import sys
def main():
"""Go on a power trip by nitpicking the scikit-bio repo.
Attempts to find things that are wrong with the repo -- these are usually
annoying details introduced by human error. The code goes out of its way
to nitpick as much as possible in order to maximize the effectiveness of
the power trip.
Returns
-------
int
Return code: 0 if there were no validation errors, 1 otherwise. Useful
as an exit code (e.g. for use with ``sys.exit``).
"""
root = 'skbio'
validators = [InitValidator(), ExecPermissionValidator(),
GeneratedCythonValidator()]
return_code = 0
for validator in validators:
success, msg = validator.validate(root)
if not success:
return_code = 1
sys.stderr.write('\n'.join(msg))
sys.stderr.write('\n\n')
return return_code
class RepoValidator(object):
"""Abstract base class representing a repository validator.
Subclasses must override and implement ``_validate`` (see its docstring for
more details).
Subclasses should also provide a ``reason``: this is a string describing
the reason for a particular type of validation failure (see subclasses for
examples). ``reason`` is included in the validation error message/report
created by ``validate``.
"""
reason = ''
def validate(self, root):
"""Validate a directory tree recursively.
Parameters
----------
root : str
Root directory to validate recursively.
Returns
-------
tuple of (bool, list of str)
First element is a ``bool`` indicating success status: ``True`` if
`root` passed validation, ``False`` if there were any errors.
Second element is a list of strings containing the validation error
message.
"""
invalids = []
for root, dirs, files in os.walk(root):
result = self._validate(root, dirs, files)
invalids.extend(result)
success = True
msg = []
if invalids:
success = False
msg.append(self.reason + ':')
for invalid in invalids:
msg.append(" %s" % invalid)
return success, msg
def _validate(self, root, dirs, files):
"""Validate a single directory.
Subclasses must override and implement this method. The method is
supplied with the three values yielded by ``os.walk``.
Parameters
----------
root : str
Path to the current directory to be validated.
dirs : list of str
Directory names within `root`.
files : list of str
Filenames within `root`.
Returns
-------
list of str
List of filepaths or dirpaths to be considered invalid (i.e., that
did not pass the validation checks).
See Also
--------
os.walk
"""
raise NotImplementedError("Subclasses must implement _validate.")
class InitValidator(RepoValidator):
"""Flag library code directories that are missing init files.
This type of validation is important mainly because it is very easy to
forget to add an __init__.py file to a new test directory. If this
happens, nose will skip those tests unless it is run from the root of the
source repository. Thus, the tests will be skipped if the package is
pip-installed, e.g., as an end-user might install a release.
Parameters
----------
skip_dirs : iterable of str, optional
Directory names to skip during validation. Defaults to skipping any
directories named ``'data'`` or ``'__pycache__'`` (and anything
contained within them).
"""
reason = "Directories missing init files"
def __init__(self, skip_dirs=None):
if skip_dirs is None:
skip_dirs = {'data', '__pycache__'}
self.skip_dirs = set(skip_dirs)
def _validate(self, root, dirs, files):
# If any of the directories yet to be visited should be skipped, remove
# them from ``dirs`` so that we don't visit them in a future iteration.
# This guarantees that ``root`` is a valid directory that should not be
# skipped (since we're doing a top-down walk).
for skip_dir in self.skip_dirs:
if skip_dir in dirs:
dirs.remove(skip_dir)
invalid_dirs = []
if '__init__.py' not in files:
invalid_dirs.append(root)
return invalid_dirs
class ExecPermissionValidator(RepoValidator):
"""Flag code files that have execute permissions.
Parameters
----------
extensions : iterable of str, optional
File extensions of files to validate. Defaults to Python, Cython, and
C files (header and source files).
"""
reason = "Library code with execute permissions"
def __init__(self, extensions=None):
if extensions is None:
extensions = {'.py', '.pyx', '.h', '.c'}
self.extensions = set(extensions)
def _validate(self, root, dirs, files):
invalid_fps = []
for f in files:
_, ext = os.path.splitext(f)
if ext in self.extensions:
fp = os.path.join(root, f)
if os.access(fp, os.X_OK):
invalid_fps.append(fp)
return invalid_fps
class GeneratedCythonValidator(RepoValidator):
"""Flag Cython files that are missing generated C files.
Flags Cython files that aren't paired with a generated C file. The
generated C file must be in the same directory as the Cython file, and its
name (besides the file extension) must match. The validator also ensures
that the generated C file is not empty.
Parameters
----------
cython_ext : str, optional
File extension for Cython files.
c_ext : str, optional
File extension for generated C files.
"""
reason = "Cython code missing generated C code"
def __init__(self, cython_ext='.pyx', c_ext='.c'):
self.cython_ext = cython_ext
self.c_ext = c_ext
def _validate(self, root, dirs, files):
invalid_fps = []
ext_to_base = collections.defaultdict(list)
# Map from file extension to a list of basenames (without the
# extension).
for f in files:
base, ext = os.path.splitext(f)
ext_to_base[ext].append(base)
# For each Cython file, try to find a matching C file. If we have a
# match, make sure the C file isn't empty.
for cython_base in ext_to_base[self.cython_ext]:
cython_fp = os.path.join(root, cython_base + self.cython_ext)
c_fp = os.path.join(root, cython_base + self.c_ext)
if cython_base not in ext_to_base[self.c_ext]:
invalid_fps.append(cython_fp)
elif os.path.getsize(c_fp) <= 0:
invalid_fps.append(cython_fp)
return invalid_fps
if __name__ == '__main__':
sys.exit(main())
|
Python
| 0 |
@@ -492,16 +492,40 @@
rt sys%0A%0A
+import skbio as biolopy%0A
%0Adef mai
|
4bce7685c39e7efbb674407184d0bf436cbdaec0
|
Create ftxproxy.py
|
ftxproxy.py
|
ftxproxy.py
|
Python
| 0 |
@@ -0,0 +1,3251 @@
+#!/usr/bin/python%0A# This is a simple port-forward / proxy, written using only the default python%0A# library. If you want to make a suggestion or fix something you can contact-me%0A# at voorloop_at_gmail.com%0A# Distributed over IDC(I Don't Care) license%0Aimport socket%0Aimport select%0Aimport time%0Aimport sys%0A%0A# Changing the buffer_size and delay, you can improve the speed and bandwidth.%0A# But when buffer get to high or delay go too down, you can broke things%0Abuffer_size = 4096%0Adelay = 0.0001%0Aforward_to = ('10.11.10.18', 8989)%0A%0Aclass Forward:%0A def __init__(self):%0A self.forward = socket.socket(socket.AF_INET, socket.SOCK_STREAM)%0A%0A def start(self, host, port):%0A try:%0A self.forward.connect((host, port))%0A return self.forward%0A except Exception, e:%0A print e%0A return False%0A%0Aclass TheServer:%0A input_list = %5B%5D%0A channel = %7B%7D%0A%0A def __init__(self, host, port):%0A self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)%0A self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)%0A self.server.bind((host, port))%0A self.server.listen(200)%0A%0A def main_loop(self):%0A self.input_list.append(self.server)%0A while 1:%0A time.sleep(delay)%0A ss = select.select%0A inputready, outputready, exceptready = ss(self.input_list, %5B%5D, %5B%5D)%0A for self.s in inputready:%0A if self.s == self.server:%0A self.on_accept()%0A break%0A%0A self.data = self.s.recv(buffer_size)%0A if len(self.data) == 0:%0A self.on_close()%0A break%0A else:%0A self.on_recv()%0A%0A def on_accept(self):%0A forward = Forward().start(forward_to%5B0%5D, forward_to%5B1%5D)%0A clientsock, clientaddr = self.server.accept()%0A if forward:%0A print clientaddr, %22has connected%22%0A self.input_list.append(clientsock)%0A self.input_list.append(forward)%0A self.channel%5Bclientsock%5D = forward%0A self.channel%5Bforward%5D = clientsock%0A else:%0A print %22Can't establish connection with remote server.%22,%0A print %22Closing connection with client side%22, clientaddr%0A clientsock.close()%0A%0A def on_close(self):%0A print self.s.getpeername(), %22has disconnected%22%0A #remove objects from input_list%0A self.input_list.remove(self.s)%0A self.input_list.remove(self.channel%5Bself.s%5D)%0A out = self.channel%5Bself.s%5D%0A # close the connection with client%0A self.channel%5Bout%5D.close() # equivalent to do self.s.close()%0A # close the connection with remote server%0A self.channel%5Bself.s%5D.close()%0A # delete both objects from channel dict%0A del self.channel%5Bout%5D%0A del self.channel%5Bself.s%5D%0A%0A def on_recv(self):%0A data = self.data%0A # here we can parse and/or modify the data before send forward%0A print data%0A self.channel%5Bself.s%5D.send(data)%0A%0Aif __name__ == '__main__':%0A server = TheServer('', 8002)%0A try:%0A server.main_loop()%0A except KeyboardInterrupt:%0A print %22Ctrl C - Stopping server%22%0A sys.exit(1)%0A
|
|
ec8f6fdba200fcb4816e170c1517899f1c03db04
|
added vowels
|
vowels.py
|
vowels.py
|
Python
| 0.999665 |
@@ -0,0 +1,182 @@
+# Copyright %C2%A9 2014 Bart Massey%0A# Print the hex value of the set of vowels.%0A%0An = 0%0Afor c in ('a', 'e', 'i', 'o', 'u'):%0A b = 2**(ord(c) - ord('a'))%0A n += b%0Aprint(format(n, %22x%22))%0A
|
|
0f1475eddf3f9237a1f746784b090a4f65d96226
|
add import script for Swindon
|
polling_stations/apps/data_collection/management/commands/import_swindon.py
|
polling_stations/apps/data_collection/management/commands/import_swindon.py
|
Python
| 0 |
@@ -0,0 +1,394 @@
+from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter%0A%0Aclass Command(BaseXpressDemocracyClubCsvImporter):%0A council_id = 'E06000030'%0A addresses_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017 (11).tsv'%0A stations_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017 (11).tsv'%0A elections = %5B'parl.2017-06-08'%5D%0A csv_delimiter = '%5Ct'%0A
|
|
9366fe261b2f13f81678851fd5ae4a0035a811c7
|
Add new package: py-walinuxagent (#18961)
|
var/spack/repos/builtin/packages/py-walinuxagent/package.py
|
var/spack/repos/builtin/packages/py-walinuxagent/package.py
|
Python
| 0 |
@@ -0,0 +1,862 @@
+# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass PyWalinuxagent(PythonPackage):%0A %22%22%22Microsoft Azure Linux Guest Agent.%22%22%22%0A%0A homepage = %22https://github.com/Azure/WALinuxAgent%22%0A url = %22https://github.com/Azure/WALinuxAgent/archive/pre-v2.2.52.tar.gz%22%0A%0A version('2.2.52', sha256='02c26af75827bd7042aa2285c78dee86ddb25a6a8f6bb0a85679a2df9ba56a3a')%0A version('2.2.50', sha256='3b2b99552e3b35dfcbb4cabf476d0113d701eb23d2e0e61f35f0fa33cabde0a1')%0A%0A depends_on('[email protected]:', type=('build', 'run'))%0A depends_on('py-setuptools', type='build')%0A depends_on('py-pyasn1', type=('build', 'run'))%0A depends_on('py-distro', type=('build', 'run'), when='%[email protected]:')%0A
|
|
f3db6608c2b4afeb214c3f1b94e0175609ad0b88
|
Add migration file for event slug changes
|
cs4teachers/events/migrations/0018_auto_20170706_0803.py
|
cs4teachers/events/migrations/0018_auto_20170706_0803.py
|
Python
| 0 |
@@ -0,0 +1,1289 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.2 on 2017-07-06 08:03%0Afrom __future__ import unicode_literals%0A%0Aimport autoslug.fields%0Afrom django.db import migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('events', '0017_auto_20170705_0952'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='event',%0A name='slug',%0A field=autoslug.fields.AutoSlugField(editable=False, populate_from='name'),%0A ),%0A migrations.AlterField(%0A model_name='location',%0A name='slug',%0A field=autoslug.fields.AutoSlugField(editable=False, populate_from='name'),%0A ),%0A migrations.AlterField(%0A model_name='resource',%0A name='slug',%0A field=autoslug.fields.AutoSlugField(editable=False, populate_from='name'),%0A ),%0A migrations.AlterField(%0A model_name='session',%0A name='slug',%0A field=autoslug.fields.AutoSlugField(editable=False, populate_from='name', unique_with=%5B'event__slug'%5D),%0A ),%0A migrations.AlterField(%0A model_name='thirdpartyevent',%0A name='slug',%0A field=autoslug.fields.AutoSlugField(editable=False, populate_from='name'),%0A ),%0A %5D%0A
|
|
7ee468a779d2cfb558ebf89af59ac2f8d12218fa
|
Add better help output
|
src/sentry/runner/commands/backup.py
|
src/sentry/runner/commands/backup.py
|
"""
sentry.runner.commands.backup
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import click
from sentry.runner.decorators import configuration
@click.command(name='import')
@click.argument('src', type=click.File('rb'))
@configuration
def import_(src):
"Imports data from a Sentry export."
from django.core import serializers
for obj in serializers.deserialize("json", src, stream=True, use_natural_keys=True):
obj.save()
def sort_dependencies(app_list):
"""
Similar to Django's except that we discard the important of natural keys
when sorting dependencies (i.e. it works without them).
"""
from django.db.models import get_model, get_models
# Process the list of models, and get the list of dependencies
model_dependencies = []
models = set()
for app, model_list in app_list:
if model_list is None:
model_list = get_models(app)
for model in model_list:
models.add(model)
# Add any explicitly defined dependencies
if hasattr(model, 'natural_key'):
deps = getattr(model.natural_key, 'dependencies', [])
if deps:
deps = [get_model(*d.split('.')) for d in deps]
else:
deps = []
# Now add a dependency for any FK relation with a model that
# defines a natural key
for field in model._meta.fields:
if hasattr(field.rel, 'to'):
rel_model = field.rel.to
if rel_model != model:
deps.append(rel_model)
# Also add a dependency for any simple M2M relation with a model
# that defines a natural key. M2M relations with explicit through
# models don't count as dependencies.
for field in model._meta.many_to_many:
rel_model = field.rel.to
if rel_model != model:
deps.append(rel_model)
model_dependencies.append((model, deps))
model_dependencies.reverse()
# Now sort the models to ensure that dependencies are met. This
# is done by repeatedly iterating over the input list of models.
# If all the dependencies of a given model are in the final list,
# that model is promoted to the end of the final list. This process
# continues until the input list is empty, or we do a full iteration
# over the input models without promoting a model to the final list.
# If we do a full iteration without a promotion, that means there are
# circular dependencies in the list.
model_list = []
while model_dependencies:
skipped = []
changed = False
while model_dependencies:
model, deps = model_dependencies.pop()
# If all of the models in the dependency list are either already
# on the final model list, or not on the original serialization list,
# then we've found another model with all it's dependencies satisfied.
found = True
for candidate in ((d not in models or d in model_list) for d in deps):
if not candidate:
found = False
if found:
model_list.append(model)
changed = True
else:
skipped.append((model, deps))
if not changed:
raise RuntimeError("Can't resolve dependencies for %s in serialized app list." %
', '.join('%s.%s' % (model._meta.app_label, model._meta.object_name)
for model, deps in sorted(skipped, key=lambda obj: obj[0].__name__))
)
model_dependencies = skipped
return model_list
@click.command()
@click.argument('dest', default='-', type=click.File('wb'))
@click.option('--silent', '-q', default=False, is_flag=True)
@click.option('--indent', default=2)
@click.option('--exclude', default=None)
@configuration
def export(dest, silent, indent, exclude):
"Exports core metadata for the Sentry installation."
if exclude is None:
exclude = ()
else:
exclude = exclude.lower().split(',')
from django.db.models import get_apps
from django.core import serializers
def yield_objects():
app_list = [(a, None) for a in get_apps()]
# Collate the objects to be serialized.
for model in sort_dependencies(app_list):
if (
not getattr(model, '__core__', True) or
model.__name__.lower() in exclude or
model._meta.proxy
):
if not silent:
click.echo(">> Skipping model <%s>" % (model.__name__,), err=True)
continue
queryset = model._base_manager.order_by(model._meta.pk.name)
for obj in queryset.iterator():
yield obj
if not silent:
click.echo('>> Beginning export', err=True)
serializers.serialize("json", yield_objects(), indent=indent, stream=dest,
use_natural_keys=True)
|
Python
| 0.999999 |
@@ -4036,16 +4036,50 @@
lag=True
+, help='Silence all debug output.'
)%0A@click
@@ -4111,49 +4111,175 @@
lt=2
-)%[email protected]('--exclude', default=None
+, help='Number of spaces to indent for the JSON output. (default: 2)')%[email protected]('--exclude', default=None, help='Models to exclude from export.', metavar='MODELS'
)%0A@c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.