commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
---|---|---|---|---|---|---|---|
8c82465a08f5a601e6a43a8eb675136fc3678954
|
Create lc960.py
|
LeetCode/lc960.py
|
LeetCode/lc960.py
|
Python
| 0.000001 |
@@ -0,0 +1,815 @@
+def createArray(dims) :%0A%09if len(dims) == 1:%0A%09%09return %5B0 for _ in range(dims%5B0%5D)%5D%0A%09return %5BcreateArray(dims%5B1:%5D) for _ in range(dims%5B0%5D)%5D%0A%09%0A%0Adef f(A, x, y):%0A m = len(A)%0A for i in range(m):%0A if A%5Bi%5D%5Bx%5D %3E A%5Bi%5D%5By%5D:%0A return 0%0A return 1%0A%0Aclass Solution(object):%0A def minDeletionSize(self, A):%0A %22%22%22%0A :type A: List%5Bstr%5D%0A :rtype: int%0A %22%22%22%0A n = len(A%5B0%5D)%0A g = createArray(%5Bn, n%5D)%0A for i in range(n):%0A for j in range(i+1, n):%0A g%5Bi%5D%5Bj%5D = f(A, i, j)%0A dp = createArray(%5Bn%5D)%0A for i in range(0, n):%0A dp%5Bi%5D = 1%0A for j in range(0, i):%0A if g%5Bj%5D%5Bi%5D == 1:%0A if dp%5Bi%5D %3C dp%5Bj%5D + 1:%0A dp%5Bi%5D = dp%5Bj%5D + 1%0A return n - max(dp)%0A %0A
|
|
3ebae0f57ae3396213eb28b6fc7a23ff3e3c4980
|
Create file and add pseudocode
|
uml-to-cpp.py
|
uml-to-cpp.py
|
Python
| 0.000001 |
@@ -0,0 +1,1995 @@
+# Copyright (C) 2017 Bran Seals. All rights reserved.%0A# Created: 2017-06-05%0A%0Aprint(%22== UML to CPP ==%22)%0Aprint(%22Create or modify C++ header and implementation files by plaintext UML.%22)%0A#print(%22Enter a UML filename: %22) # file import currently disabled%0A# check if file isn't too bonkers%0A#uml = %5B%5D # pull UML into memory as string list%0A# check if file is properly formatted%0A%0AclassList = %5B%5D # list of classes that will be created, along with members%0AnoteList = %5B%5D # if weird things happen, this list will show potential errors%0A # will be displayed after files are created for user info%0A%0A# while uml list items exist:%0A # get class name%0A # while %7D not reached:%0A # if +, put into hppPub%0A # if -, put into hppPriv%0A # if neither, put into hppPriv and add message to noteList%0A # use these to create UmlClass object and append to classList%0A%0A# for each in classList:%0A # build hpp list using hpp, hppPublic, hppPrivate%0A # checkForLibs()%0A # while hpp list item exists:%0A # if isFunction, append to functions list%0A # while functions list item exists:%0A # format function and append to cpp list%0A # create name.hpp file and write using hpp list%0A # create name.cpp file and write using cpp list%0A # remove object from classList?%0A%0Aclass UmlClass:%0A def __init__(self, className, hppPub, hppPriv):%0A self.name = className%0A self.hppPublic = list(hppPub)%0A self.hppPrivate = list(hppPriv)%0A%0A functions = %5B%5D # list of functions used to build cpp file%0A hpp = %5B%5D # will contain final hpp template, built from hppPub, hppPriv%0A cpp = %5B%5D # same as hpp, but with implementation file%0A%0A #def isFunction(): # looks for function syntax%0A # used when creating cpp file from hpp list%0A%0A #def checkForLibs(): # include libraries for data types that need them%0A%0A #def formatFunc(): # formats function from hpp to cpp style%0A # also takes into account return type and variable names
|
|
b5f3a984b068d0bf09f63380b365436114e54591
|
Switch to a cleaner method of getting version number from server.
|
flexget/options.py
|
flexget/options.py
|
from __future__ import unicode_literals, division, absolute_import
import sys
import subprocess
import re
from argparse import ArgumentParser as ArgParser, Action, ArgumentError, SUPPRESS, _VersionAction
import flexget
from flexget.utils.tools import console
from flexget.utils import requests
def required_length(nmin, nmax):
"""Generates a custom Action to validate an arbitrary range of arguments."""
class RequiredLength(Action):
def __call__(self, parser, args, values, option_string=None):
if not nmin <= len(values) <= nmax:
raise ArgumentError(self, 'requires between %s and %s arguments' % (nmin, nmax))
setattr(args, self.dest, values)
return RequiredLength
class VersionAction(_VersionAction):
"""
Action to print the current version.
Also attempts to get more information from git describe if on git checkout.
"""
def __call__(self, parser, namespace, values, option_string=None):
if self.version == '{git}':
# Attempt to get version from git
version = ''
try:
subprocess.call(['git', 'fetch', '--tags'], stdout=subprocess.PIPE)
p = subprocess.Popen(['git', 'describe'], stdout=subprocess.PIPE)
version = p.stdout.read().strip()
except Exception:
pass
if version.startswith('1.0'):
self.version += version
# Print the version number
console('%s' % self.version)
# Check for latest version from server
try:
page = requests.get('http://download.flexget.com')
except requests.RequestException:
console('Error getting latest version number from download.flexget.com')
else:
ver = re.search(r'FlexGet-([\d\.]*)\.tar\.gz', page.text).group(1)
if self.version.lstrip('{git}') == ver:
console('You are on the latest release.')
else:
console('Latest release: %s' % ver)
parser.exit()
class ArgumentParser(ArgParser):
"""Contains all the options that both the core and webui should have"""
def __init__(self, **kwargs):
# Do this early, so even option processing stuff is caught
if '--bugreport' in sys.argv:
self._debug_tb_callback()
ArgParser.__init__(self, **kwargs)
self.add_argument('-V', '--version', action=VersionAction, version=flexget.__version__,
help='Print FlexGet version and exit.')
# This option is already handled above.
self.add_argument('--bugreport', action='store_true', dest='debug_tb',
help='Use this option to create a detailed bug report, '
'note that the output might contain PRIVATE data, so edit that out')
self.add_argument('--logfile', default='flexget.log',
help='Specify a custom logfile name/location. '
'Default is flexget.log in the config directory.')
self.add_argument('--debug', action='store_true', dest='debug',
help=SUPPRESS)
self.add_argument('--debug-trace', action='store_true', dest='debug_trace',
help=SUPPRESS)
self.add_argument('--loglevel', default='verbose',
choices=['none', 'critical', 'error', 'warning', 'info', 'verbose', 'debug', 'trace'],
help=SUPPRESS)
self.add_argument('--debug-sql', action='store_true', default=False,
help=SUPPRESS)
self.add_argument('-c', dest='config', default='config.yml',
help='Specify configuration file. Default is config.yml')
self.add_argument('--experimental', action='store_true', default=False,
help=SUPPRESS)
self.add_argument('--del-db', action='store_true', dest='del_db', default=False,
help=SUPPRESS)
self.add_argument('--profile', action='store_true', default=False, help=SUPPRESS)
def add_argument(self, *args, **kwargs):
if isinstance(kwargs.get('nargs'), basestring) and '-' in kwargs['nargs']:
# Handle a custom range of arguments
min, max = kwargs['nargs'].split('-')
min, max = int(min), int(max)
kwargs['action'] = required_length(min, max)
# Make the usage string a bit better depending on whether the first argument is optional
if min == 0:
kwargs['nargs'] = '*'
else:
kwargs['nargs'] = '+'
super(ArgumentParser, self).add_argument(*args, **kwargs)
def parse_args(self, args=None, namespace=None):
if args is None:
args = [unicode(arg, sys.getfilesystemencoding()) for arg in sys.argv[1:]]
args = super(ArgumentParser, self).parse_args(args, namespace)
if args.debug_trace:
args.debug = True
args.loglevel = 'trace'
elif args.debug:
args.loglevel = 'debug'
return args
def _debug_tb_callback(self, *dummy):
import cgitb
cgitb.enable(format="text")
class CoreArgumentParser(ArgumentParser):
"""Contains all the options that should only be used when running without a ui"""
def __init__(self, unit_test=False, **kwargs):
ArgumentParser.__init__(self, **kwargs)
self._unit_test = unit_test
self.add_argument('--log-start', action='store_true', dest='log_start', default=0,
help=SUPPRESS)
self.add_argument('--test', action='store_true', dest='test', default=0,
help='Verbose what would happen on normal execution.')
self.add_argument('--check', action='store_true', dest='validate', default=0,
help='Validate configuration file and print errors.')
self.add_argument('--learn', action='store_true', dest='learn', default=0,
help='Matches are not downloaded but will be skipped in the future.')
self.add_argument('--no-cache', action='store_true', dest='nocache', default=0,
help='Disable caches. Works only in plugins that have explicit support.')
self.add_argument('--reset', action='store_true', dest='reset', default=0,
help='DANGEROUS. Obliterates the database and runs with learn '
'in order to to regain useful state.')
# TODO: rename dest to cron, since this does more than just quiet
self.add_argument('--cron', action='store_true', dest='quiet', default=False,
help='Disables stdout and stderr output, log file used. Reduces logging level slightly.')
self.add_argument('--db-cleanup', action='store_true', dest='db_cleanup', default=False,
help='Forces the database cleanup event to run right now.')
# Plugins should respect this flag and retry where appropriate
self.add_argument('--retry', action='store_true', dest='retry', default=0, help=SUPPRESS)
self.add_argument('--validate', action='store_true', dest='validate', default=False,
help=SUPPRESS)
self.add_argument('--migrate', action='store', dest='migrate', default=None,
help=SUPPRESS)
# provides backward compatibility to --cron and -d
self.add_argument('-q', '--quiet', action='store_true', dest='quiet', default=False,
help=SUPPRESS)
def parse_args(self, args=None, namespace=None):
args = super(CoreArgumentParser, self).parse_args(args or self._unit_test and ['--reset'] or None, namespace)
if args.test and (args.learn or args.reset):
self.error('--test and %s are mutually exclusive' % ('--learn' if args.learn else '--reset'))
# reset and migrate should be executed with learn
if (args.reset and not self._unit_test) or args.migrate:
args.learn = True
# Lower the log level when executed with --cron
if args.quiet:
args.loglevel = 'info'
return args
|
Python
| 0 |
@@ -93,18 +93,8 @@
ess%0A
-import re%0A
from
@@ -1618,32 +1618,46 @@
load.flexget.com
+/latestversion
')%0A excep
@@ -1805,49 +1805,8 @@
er =
- re.search(r'FlexGet-(%5B%5Cd%5C.%5D*)%5C.tar%5C.gz',
pag
@@ -1815,17 +1815,15 @@
text
-).grou
+.stri
p(
-1
)%0A
|
e2ed635fb3289a5b45f5f15cd1eb543d87fb93d7
|
Add test for posting a review through the view
|
wafer/talks/tests/test_review_views.py
|
wafer/talks/tests/test_review_views.py
|
Python
| 0 |
@@ -0,0 +1,1919 @@
+%22%22%22Tests for wafer.talk review form behaviour.%22%22%22%0A%0Afrom django.test import Client, TestCase%0Afrom django.urls import reverse%0A%0Afrom reversion import revisions%0Afrom reversion.models import Version%0A%0Afrom wafer.talks.models import (SUBMITTED, UNDER_CONSIDERATION,%0A ReviewAspect, Review)%0Afrom wafer.talks.forms import ReviewForm, make_aspect_key%0A%0Afrom wafer.tests.utils import create_user%0Afrom wafer.talks.tests.fixtures import create_talk%0A%0A%0A%0A%0Aclass ReviewFormTests(TestCase):%0A def setUp(self):%0A self.reviewer_a = create_user('reviewer_a', perms=('add_review',))%0A self.talk_a = create_talk('Talk A', SUBMITTED, %22author_a%22)%0A with revisions.create_revision():%0A # Ensure we have an initial revision%0A self.talk_a.save()%0A self.aspect_1 = ReviewAspect.objects.create(name='General')%0A self.aspect_2 = ReviewAspect.objects.create(name='Other')%0A self.client = Client()%0A%0A def test_review_submission(self):%0A %22%22%22Test that submitting a review works%22%22%22%0A self.client.login(username='reviewer_a', password='reviewer_a_password')%0A self.assertTrue(Version.objects.get_for_object(self.talk_a), 1)%0A response = self.client.post(reverse('wafer_talk_review', kwargs=%7B'pk': self.talk_a.pk%7D),%0A data=%7B'notes': 'Review notes',%0A make_aspect_key(self.aspect_1): '1',%0A make_aspect_key(self.aspect_2): '2'%7D)%0A self.assertEqual(response.status_code, 302)%0A review = Review.objects.get(talk=self.talk_a, reviewer=self.reviewer_a)%0A self.assertEqual(review.avg_score, 1.5)%0A self.talk_a.refresh_from_db()%0A self.assertEqual(self.talk_a.status, UNDER_CONSIDERATION)%0A self.assertTrue(Version.objects.get_for_object(self.talk_a), 2)%0A self.assertTrue(review.is_current())%0A
|
|
466410249867b3eadbe5e2b59c46c95ecd288c6c
|
Add script for word counts
|
python_scripts/solr_query_fetch_all.py
|
python_scripts/solr_query_fetch_all.py
|
Python
| 0.000001 |
@@ -0,0 +1,935 @@
+#!/usr/bin/python%0A%0Aimport requests%0Aimport ipdb%0Aimport time%0Aimport csv%0Aimport sys%0Aimport pysolr%0A%0Adef fetch_all( solr, query ) :%0A documents = %5B%5D%0A num_matching_documents = solr.search( query ).hits%0A%0A start = 0%0A rows = num_matching_documents%0A%0A sys.stderr.write( ' starting fetch for ' + query )%0A while ( len( documents ) %3C num_matching_documents ) :%0A results = solr.search( query, **%7B %0A 'start': start,%0A 'rows': rows,%0A # 'fl' : 'media_id',%0A %7D)%0A documents.extend( results.docs )%0A start += rows%0A%0A assert len( documents ) %3C= num_matching_documents%0A%0A assert len( documents ) == num_matching_documents%0A return documents%0A%0A%0A%0Asolr = pysolr.Solr('http://localhost:8983/solr/')%0A%0Aqueries = %5B '*:*',%0A %5D%0A%0Afor query in queries:%0A print query%0A results = fetch_all( solr, query )%0A print %22got %22 + query%0A print results%0A%0A %0A%0A
|
|
3f69fae4f15efff515b82f216de36dd6d57807e9
|
add ci_test.py file for ci
|
settings/ci_test.py
|
settings/ci_test.py
|
Python
| 0.000001 |
@@ -0,0 +1,182 @@
+__author__ = 'quxl'%0Afrom base import *%0A%0ADATABASES = %7B%0A 'default': %7B%0A 'ENGINE': 'django.db.backends.sqlite3',%0A 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),%0A %7D%0A%7D%0A%0A
|
|
50f0e040f363e52a390efc6acd1bc0bc0ddcabcc
|
Add test funcs in report_reader for DB reading
|
report_reader.py
|
report_reader.py
|
Python
| 0.000001 |
@@ -0,0 +1,466 @@
+import pymongo as pm%0A%0Adef connectDB():%0A%09conn = pm.MongoClient('localhost', 27017)%0A%09db = conn.get_database('report_db')%0A%09return db%0A%0Adef getColList(db):%0A%09return db.collection_names()%0A%0Adef getDocNum(col):%0A%09return col.find().count()%0A%0Adef match(col, matchDict):%0A%09return list(col.find(matchDict))%0A%0Adef main():%0A%09db = connectDB()%0A%09print(getColList(db))%0A%09col = db%5B'col20170503'%5D%0A%09print(getDocNum(col))%0A%09print(match(col, %7B'school':'HFUT'%7D))%0A%0Aif __name__ == '__main__':%0A%09main()
|
|
1c9d398be7f99f15fb550adca31f3366870930e3
|
Set debug to false in prod, otherwise true
|
wazimap_np/settings.py
|
wazimap_np/settings.py
|
# pull in the default wazimap settings
from wazimap.settings import * # noqa
# install this app before Wazimap
INSTALLED_APPS = ['wazimap_np'] + INSTALLED_APPS
DATABASE_URL = os.environ.get('DATABASE_URL', 'postgresql://wazimap_np:wazimap_np@localhost/wazimap_np')
DATABASES['default'] = dj_database_url.parse(DATABASE_URL)
DATABASES['default']['ATOMIC_REQUESTS'] = True
SCHEME = 'http' if (os.environ.get('APP_ENV', 'dev') == 'dev') else 'https'
URL = SCHEME+'://'+'nepalmap.org'
# Localise this instance of Wazimap
WAZIMAP['name'] = 'NepalMap'
# NB: this must be https if your site supports HTTPS.
WAZIMAP['url'] = URL
WAZIMAP['country_code'] = 'NP'
WAZIMAP['profile_builder'] = 'wazimap_np.profiles.get_census_profile'
WAZIMAP['levels'] = {
'country': {
'plural': 'countries',
'children': ['district']
},
'district': {
'plural': 'districts',
'children': ['vdc']
},
'vdc': {
'plural': 'vdcs',
'children': []
}
}
WAZIMAP['comparative_levels'] = ['country', 'district', 'vdc']
WAZIMAP['geometry_data'] = {
'country': 'geo/country.topojson',
'district': 'geo/district.topojson',
'vdc': 'geo/vdc.topojson'
}
WAZIMAP['ga_tracking_id'] = os.environ.get('GA_TRACKING_ID')
WAZIMAP['twitter'] = '@codefornepal'
WAZIMAP['map_centre'] = [28.229651, 83.8165328]
WAZIMAP['map_zoom'] = 7
# Custom Settings
WAZIMAP['email'] = '[email protected]'
WAZIMAP['github'] = 'https://github.com/Code4Nepal/nepalmap_app'
WAZIMAP['tagline'] = 'Explore and understand Nepal using data'
WAZIMAP['facebook'] = 'codefornepal'
WAZIMAP['twittercard'] = True
|
Python
| 0.004374 |
@@ -72,16 +72,89 @@
# noqa%0A%0A
+DEBUG = False if (os.environ.get('APP_ENV', 'dev') == 'prod') else True%0A%0A
# instal
@@ -229,17 +229,16 @@
D_APPS%0A%0A
-%0A
DATABASE
|
22578771d9812a21361ec959d16e3eaacba998e3
|
Add APData Info collector
|
APData/APInfo.py
|
APData/APInfo.py
|
Python
| 0 |
@@ -0,0 +1,830 @@
+#%0A#%0A#%0A#%0A%0Aclass APInfo:%0A%09%22%22%22...%22%22%22%0A%09%0A%09# Protected members%0A%09__IPAddress = %22%22%0A%09__MACAddress = %22%22%0A%09__Channel = 0%0A%09__Region = 0%0A%09__Localization = %22%22%0A%09__TxPowerList = %5B%5D%0A%09__CurrentPowerIndex = -1%0A%09__UnderloadLimit = -1%0A%09__OverloadLimit = -1%0A%09__Reachable = False%0A%09__Enabled = False%0A%09__EMailSent = False%0A%09__SupportedOS = %22%22%0A%0A%09# Public members mirrors%0A%0A%09# Class initialization%0A%09def __init__(self):%0A%09%09#%0A%0A%09# Set the AP transmission power (Tx)%0A%09def updateTxPowerIndex(self, newTxPower):%0A%09%09#%0A%09%09if newTxPower %3C 0:%0A%09%09%09self.CurrentPowerIndex = len(self.TxPowerList) - 1%0A%09%09else:%0A%09%09%09for powerTxIndex in self.TxPowerList:%0A%09%09%09%09if newTxPower %3E self.TxPowerList%5BpowerTxIndex%5D:%0A%09%09%09%09%09break%0A%09%09%09self.CurrentPowerIndex = powerTxIndex%0A%0A%0A# Heroku: User: [email protected] / Senha: w4cpvX3DWw%0A%0A# wget -qO- https://toolbelt.heroku.com/install-ubuntu.sh %7C sh%0A
|
|
bd1e135a6ffd9186451ec02fcbcaab7f9066e40f
|
Add breakpad fetch recipe.
|
recipes/breakpad.py
|
recipes/breakpad.py
|
Python
| 0.001108 |
@@ -0,0 +1,1091 @@
+# Copyright (c) 2015 The Chromium Authors. All rights reserved.%0A# Use of this source code is governed by a BSD-style license that can be%0A# found in the LICENSE file.%0A%0Aimport sys%0A%0Aimport recipe_util # pylint: disable=F0401%0A%0A%0A# This class doesn't need an __init__ method, so we disable the warning%0A# pylint: disable=W0232%0Aclass Breakpad(recipe_util.Recipe):%0A @staticmethod%0A def fetch_spec(props):%0A url = 'https://chromium.googlesource.com/breakpad/breakpad.git'%0A solution = %7B%0A 'name': 'src',%0A 'url': url,%0A 'managed': False,%0A 'custom_deps': %7B%7D,%0A 'safesync_url': '',%0A %7D%0A spec = %7B%0A 'solutions': %5Bsolution%5D,%0A %7D%0A if props.get('target_os'):%0A spec%5B'target_os'%5D = props%5B'target_os'%5D.split(',')%0A if props.get('target_os_only'):%0A spec%5B'target_os_only'%5D = props%5B'target_os_only'%5D%0A return %7B%0A 'type': 'gclient_git',%0A 'gclient_git_spec': spec,%0A %7D%0A%0A @staticmethod%0A def expected_root(_props):%0A return 'src'%0A%0A%0Adef main(argv=None):%0A return Breakpad().handle_args(argv)%0A%0A%0Aif __name__ == '__main__':%0A sys.exit(main(sys.argv))%0A
|
|
4996ddddc14ad0d20759abbcf4d54e6132b7b028
|
Add the dj_redis_url file
|
dj_redis_url.py
|
dj_redis_url.py
|
Python
| 0.000001 |
@@ -0,0 +1,951 @@
+# -*- coding: utf-8 -*-%0A%0Aimport os%0A%0Atry:%0A import urlparse%0Aexcept ImportError:%0A import urllib.parse as urlparse%0A%0A%0A# Register database schemes in URLs.%0Aurlparse.uses_netloc.append(%22redis%22)%0A%0ADEFAULT_ENV = %22REDIS_URL%22%0A%0A%0Adef config(env=DEFAULT_ENV, default=None, **overrides):%0A %22%22%22Returns configured REDIS dictionary from REDIS_URL.%22%22%22%0A%0A config = %7B%7D%0A%0A s = os.environ.get(env, default)%0A%0A if s:%0A config = parse(s)%0A%0A overrides = dict(%5B(k.upper(), v) for k, v in overrides.items()%5D)%0A%0A config.update(overrides)%0A%0A return config%0A%0A%0Adef parse(url):%0A %22%22%22Parses a database URL.%22%22%22%0A%0A config = %7B%7D%0A%0A url = urlparse.urlparse(url)%0A%0A # Remove query strings.%0A path = url.path%5B1:%5D%0A path = path.split('?', 2)%5B0%5D%0A%0A # Update with environment configuration.%0A config.update(%7B%0A %22DB%22: int(path or 0),%0A %22PASSWORD%22: url.password,%0A %22HOST%22: url.hostname,%0A %22PORT%22: url.port,%0A %7D)%0A%0A return config%0A
|
|
1487722c0431fce19d54b1b020c3af0ab411cc8a
|
Add sample config.py file
|
rename_to_config.py
|
rename_to_config.py
|
Python
| 0.000001 |
@@ -0,0 +1,126 @@
+account_sid = %22ACXXXXXXXXXXXXXXXXX%22%0Aauth_token = %22XXXXXXXXXXXXXXXX%22%0Afrom_number = %22+441111222333%22%0Ato_number = %22+447777222333%22%0A
|
|
74dcd072efabe20137e32fcfa0560a41a532a2ba
|
reverse Integer
|
python/math/reverseInteger.py
|
python/math/reverseInteger.py
|
Python
| 0.99998 |
@@ -0,0 +1,1060 @@
+class Solution:%0A # @return an integer%0A def reverse(self, x):%0A INT_MAX = 2147483647%0A INT_MIN = -2147483648%0A result = 0%0A negative = 1%0A %0A if x %3C 0:%0A negative = -1%0A x = 0 - x %0A %0A temp = x / 10%0A ten = 1%0A while temp %3E 0:%0A temp = temp / 10%0A ten = ten * 10%0A %0A i = 0 %0A while ten %3E 0:%0A curr = x / ten%0A %0A if negative == 1:%0A if INT_MAX / (10 ** i) %3C curr or (INT_MAX - result - curr * 10 ** i) %3C 0:%0A return 0%0A else:%0A if (0-INT_MIN) / (10 ** i) %3C curr or (0 - INT_MIN - result - curr * 10 ** i) %3C 0:%0A return 0%0A %0A result = result + curr * (10 ** i)%0A x = x %25 ten%0A ten = ten / 10%0A i += 1%0A %0A return negative * result%0A%0Aif __name__ == %22__main__%22:%0A solution = Solution()%0A print solution.reverse(123)%0A print solution.reverse(1563847412)%0A
|
|
0cc3aafced65d2f128a8036aad62edb5ee19f566
|
Add brume main script
|
scripts/brume.py
|
scripts/brume.py
|
Python
| 0 |
@@ -0,0 +1,2689 @@
+#!/usr/bin/env python%0A%0Aimport os%0Aimport click%0Aimport yaml%0A%0Afrom glob import glob%0Afrom subprocess import check_output%0Afrom brume.template import CfnTemplate%0Afrom brume.stack import Stack%0A%0A%0Adef load_configuration(config_file='brume.yml'):%0A %22%22%22Return the YAML configuration for a project based on the %60config_file%60 template.%22%22%22%0A from jinja2 import Template%0A%0A def env(key):%0A %22%22%22Return the value of the %60key%60 environment variable.%22%22%22%0A return os.getenv(key, None)%0A%0A def git_commit():%0A %22%22%22Return the SHA1 of the latest Git commit (HEAD).%22%22%22%0A return check_output(%5B'git', 'rev-parse', '--short', 'HEAD'%5D).strip()%0A%0A def git_branch():%0A %22%22%22Return the name of the current Git branch.%22%22%22%0A return check_output(%5B'git', 'rev-parse', '--abbrev-ref', 'HEAD'%5D).strip()%0A%0A template = Template(open(config_file, 'r').read())%0A return yaml.load(template.render(env=env, git_commit=git_commit(), git_branch=git_branch()))%0A%0A%0Aconf = load_configuration()%0As3_config = conf%5B'templates'%5D%0Acf_config = conf%5B'stack'%5D%0A%0A%0Adef collect_templates():%0A return %5BCfnTemplate(t) for t in glob('*.cform')%5D%0A%0A%[email protected]()%0Adef config():%0A %22%22%22Print the current stack confguration.%22%22%22%0A print(yaml.dump(conf))%0A%0A%[email protected]()%0Adef create():%0A %22%22%22Create a new CloudFormation stack.%22%22%22%0A stack = Stack(cf_config)%0A stack.create()%0A stack.tail()%0A%0A%[email protected]()%0Adef update():%0A %22%22%22Update an existing CloudFormation stack.%22%22%22%0A stack = Stack(cf_config)%0A stack.update()%0A stack.tail()%0A%0A%[email protected]()%0Adef deploy():%0A %22%22%22Create or update a CloudFormation stack.%22%22%22%0A stack = Stack(cf_config)%0A stack.create_or_update()%0A stack.tail()%0A%0A%[email protected]()%0Adef delete():%0A %22%22%22Delete a CloudFormation stack.%22%22%22%0A stack = Stack(cf_config)%0A stack.delete()%0A stack.tail()%0A%0A%[email protected]()%0Adef validate():%0A %22%22%22Validate CloudFormation templates.%22%22%22%0A templates = collect_templates()%0A return map(lambda t: t.validate(), templates)%0A%0A%[email protected]()%0Adef events():%0A %22%22%22Tail the events of the stack.%22%22%22%0A Stack(cf_config).tail()%0A%0A%[email protected]()%[email protected]('--bucket', required=True, help='Name of the bucket')%[email protected]('--prefix', required=True, help='Prefix to the file name')%0Adef upload(templates, bucket, path_prefix):%0A %22%22%22Upload CloudFormation templates to S3.%22%22%22%0A %5Bt.upload(bucket, path_prefix) for t in templates%5D%0A return templates%0A%0A%[email protected]()%0Adef cli():%0A pass%0A%0Acli.add_command(create)%0Acli.add_command(update)%0Acli.add_command(deploy)%0Acli.add_command(upload)%0Acli.add_command(delete)%0Acli.add_command(validate)%0Acli.add_command(config)%0Acli.add_command(events)%0A%0Aif __name__ == '__main__':%0A cli()%0A
|
|
bf02019c8b97d8dc35e3e186b31cb57adac6a8ec
|
Create a measurement
|
shrugd-create.py
|
shrugd-create.py
|
Python
| 0.998877 |
@@ -0,0 +1,1373 @@
+import ripe.atlas.cousteau%0Afrom atlaskeys import create_key%0A%0A# DNS query properties%0Aquery_argument = %22wide.ad.jp%22%0Aquery_type = %22AAAA%22%0Adnssec_ok = True%0Aset_nsid_bit = True%0A%0A# IP addresses to start from%0Adns_server_ips = %5B %0A %22199.7.91.13%22, %222001:500:2d::d%22, # D.ROOT-SERVERS.NET%0A %22192.203.230.10%22, # E.ROOT-SERVERS.NET%0A%5D%0A%0Adef ip_address_family(ip_addr):%0A %22%22%22Return whether an IP address is IPv4 or IPv6%22%22%22%0A if ':' in ip_addr:%0A return 6%0A else:%0A return 4%0A%0Adns_measurements = %5B%5D%0Afor ip_addr in dns_server_ips:%0A dns_query = ripe.atlas.cousteau.Dns(%0A target=ip_addr,%0A af=ip_address_family(ip_addr),%0A query_argument=query_argument,%0A query_type=query_type,%0A query_class=%22IN%22,%0A set_nsid_bit=set_nsid_bit,%0A udp_payload_size=4096,%0A description=%22shrugd %22 + query_argument + %22/%22 %0A )%0A dns_measurements.append(dns_query)%0A break%0A%0A# XXX: possibly should at least pick good IPv6 servers when querying over IPv6%0Asource = ripe.atlas.cousteau.AtlasSource(type=%22area%22, value=%22WW%22, requested=1)%0A%0Aatlas_request = ripe.atlas.cousteau.AtlasCreateRequest(%0A key=create_key,%0A measurements=dns_measurements,%0A sources=%5Bsource%5D,%0A is_oneoff=True%0A)%0A(is_success, response) = atlas_request.create()%0Aif is_success:%0A print(%22worked, IDs: %25s%22 %25 response)%0Aelse:%0A print(%22did not work%22)%0A%0A
|
|
69d358fa08652e44dc37974bb735cfdc40ccf1db
|
increase UPDATE_INTERVAL (#18429)
|
homeassistant/components/edp_redy.py
|
homeassistant/components/edp_redy.py
|
"""
Support for EDP re:dy.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/edp_redy/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.const import (CONF_USERNAME, CONF_PASSWORD,
EVENT_HOMEASSISTANT_START)
from homeassistant.core import callback
from homeassistant.helpers import discovery, dispatcher, aiohttp_client
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_point_in_time
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'edp_redy'
EDP_REDY = 'edp_redy'
DATA_UPDATE_TOPIC = '{0}_data_update'.format(DOMAIN)
UPDATE_INTERVAL = 30
REQUIREMENTS = ['edp_redy==0.0.2']
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string
})
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the EDP re:dy component."""
from edp_redy import EdpRedySession
session = EdpRedySession(config[DOMAIN][CONF_USERNAME],
config[DOMAIN][CONF_PASSWORD],
aiohttp_client.async_get_clientsession(hass),
hass.loop)
hass.data[EDP_REDY] = session
platform_loaded = False
async def async_update_and_sched(time):
update_success = await session.async_update()
if update_success:
nonlocal platform_loaded
# pylint: disable=used-before-assignment
if not platform_loaded:
for component in ['sensor', 'switch']:
await discovery.async_load_platform(hass, component,
DOMAIN, {}, config)
platform_loaded = True
dispatcher.async_dispatcher_send(hass, DATA_UPDATE_TOPIC)
# schedule next update
async_track_point_in_time(hass, async_update_and_sched,
time + timedelta(seconds=UPDATE_INTERVAL))
async def start_component(event):
_LOGGER.debug("Starting updates")
await async_update_and_sched(dt_util.utcnow())
# only start fetching data after HA boots to prevent delaying the boot
# process
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_component)
return True
class EdpRedyDevice(Entity):
"""Representation a base re:dy device."""
def __init__(self, session, device_id, name):
"""Initialize the device."""
self._session = session
self._state = None
self._is_available = True
self._device_state_attributes = {}
self._id = device_id
self._unique_id = device_id
self._name = name if name else device_id
async def async_added_to_hass(self):
"""Subscribe to the data updates topic."""
dispatcher.async_dispatcher_connect(
self.hass, DATA_UPDATE_TOPIC, self._data_updated)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def available(self):
"""Return True if entity is available."""
return self._is_available
@property
def should_poll(self):
"""Return the polling state. No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._device_state_attributes
@callback
def _data_updated(self):
"""Update state, trigger updates."""
self.async_schedule_update_ha_state(True)
def _parse_data(self, data):
"""Parse data received from the server."""
if "OutOfOrder" in data:
try:
self._is_available = not data['OutOfOrder']
except ValueError:
_LOGGER.error(
"Could not parse OutOfOrder for %s", self._id)
self._is_available = False
|
Python
| 0 |
@@ -826,9 +826,9 @@
L =
-3
+6
0%0A%0AR
|
bbdc1961271acf0dd0ad8818d41b84eea4a5aec4
|
Use entity_id attribute
|
homeassistant/components/influxdb.py
|
homeassistant/components/influxdb.py
|
"""
homeassistant.components.influxdb
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
InfluxDB component which allows you to send data to an Influx database.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/influxdb/
"""
import logging
import homeassistant.util as util
from homeassistant.helpers import validate_config
from homeassistant.const import (EVENT_STATE_CHANGED, STATE_ON, STATE_OFF,
STATE_UNLOCKED, STATE_LOCKED, STATE_UNKNOWN)
from homeassistant.components.sun import (STATE_ABOVE_HORIZON,
STATE_BELOW_HORIZON)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "influxdb"
DEPENDENCIES = []
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 8086
DEFAULT_DATABASE = 'home_assistant'
DEFAULT_SSL = False
DEFAULT_VERIFY_SSL = False
REQUIREMENTS = ['influxdb==2.11.0']
CONF_HOST = 'host'
CONF_PORT = 'port'
CONF_DB_NAME = 'database'
CONF_USERNAME = 'username'
CONF_PASSWORD = 'password'
CONF_SSL = 'ssl'
CONF_VERIFY_SSL = 'verify_ssl'
def setup(hass, config):
""" Setup the InfluxDB component. """
from influxdb import InfluxDBClient, exceptions
if not validate_config(config, {DOMAIN: ['host']}, _LOGGER):
return False
conf = config[DOMAIN]
host = conf[CONF_HOST]
port = util.convert(conf.get(CONF_PORT), int, DEFAULT_PORT)
database = util.convert(conf.get(CONF_DB_NAME), str, DEFAULT_DATABASE)
username = util.convert(conf.get(CONF_USERNAME), str)
password = util.convert(conf.get(CONF_PASSWORD), str)
ssl = util.convert(conf.get(CONF_SSL), bool, DEFAULT_SSL)
verify_ssl = util.convert(conf.get(CONF_VERIFY_SSL), bool,
DEFAULT_VERIFY_SSL)
try:
influx = InfluxDBClient(host=host, port=port, username=username,
password=password, database=database,
ssl=ssl, verify_ssl=verify_ssl)
influx.query("select * from /.*/ LIMIT 1;")
except exceptions.InfluxDBClientError as exc:
_LOGGER.error("Database host is not accessible due to '%s', please "
"check your entries in the configuration file and that"
" the database exists and is READ/WRITE.", exc)
return False
def influx_event_listener(event):
""" Listen for new messages on the bus and sends them to Influx. """
state = event.data.get('new_state')
if state is None or state.state in (STATE_UNKNOWN, ''):
return
if state.state in (STATE_ON, STATE_LOCKED, STATE_ABOVE_HORIZON):
_state = 1
elif state.state in (STATE_OFF, STATE_UNLOCKED, STATE_BELOW_HORIZON):
_state = 0
else:
try:
_state = float(state.state)
except ValueError:
_state = state.state
measurement = state.attributes.get('unit_of_measurement')
if measurement in (None, ''):
measurement = '{}.{}'.format(state.domain, state.object_id)
json_body = [
{
'measurement': measurement,
'tags': {
'domain': state.domain,
'entity_id': state.object_id,
},
'time': event.time_fired,
'fields': {
'value': _state,
}
}
]
try:
influx.write_points(json_body)
except exceptions.InfluxDBClientError:
_LOGGER.exception('Error saving event "%s" to InfluxDB', json_body)
hass.bus.listen(EVENT_STATE_CHANGED, influx_event_listener)
return True
|
Python
| 0.000002 |
@@ -3035,53 +3035,23 @@
t =
-'%7B%7D.%7B%7D'.format(state.domain, state.object
+state.entity
_id
-)
%0A%0A
|
323176a9749d37d05e87339fe34b50b90cc6b663
|
add solution for Maximum Product Subarray
|
src/maximumProductSubarray.py
|
src/maximumProductSubarray.py
|
Python
| 0 |
@@ -0,0 +1,509 @@
+class Solution:%0A # @param A, a list of integers%0A # @return an integer%0A%0A def maxProduct(self, A):%0A if not A:%0A return 0%0A if len(A) == 1:%0A return A%5B0%5D%0A%0A maxV, minV = A%5B0%5D, A%5B0%5D%0A res = maxV%0A for val in A%5B1:%5D:%0A if val %3E 0:%0A maxV, minV = max(val, maxV * val), min(val, minV * val)%0A else:%0A maxV, minV = max(val, minV * val), min(val, maxV * val)%0A res = max(res, maxV)%0A return res%0A
|
|
abd23cbc80149d4f2985eb8aef5d893714cca717
|
add a script to reset the db
|
scripts/reset_db.py
|
scripts/reset_db.py
|
Python
| 0 |
@@ -0,0 +1,115 @@
+from scraper import clean%0A%0Adef run():%0A if raw_input(%22Are you sure? Then write 'yes'%22) == %22yes%22:%0A clean()%0A
|
|
43e5727d4091e0b6cb11e0e13ea9f7daf69628fc
|
Add corpusPreProcess.
|
corpusPreProcess.py
|
corpusPreProcess.py
|
Python
| 0 |
@@ -0,0 +1,266 @@
+#! /usr/share/env python%0A# -*- coding=utf-8 -*-%0A%0AresultFile = open('corpus/BigCorpusPre.txt', 'w')%0Awith open('corpus/BigCorpus.txt', 'r') as f:%0A for line in f:%0A line = line%5Bline.find(':')+1:%5D%0A resultFile.write(line.strip()+'%5Cn')%0AresultFile.close()%0A%0A
|
|
82089ad5e5c0d597cfdd16575b4fa5a9a09415ff
|
introduce plumbery from the command line -- no python coding, yeah!
|
plumbery/__main__.py
|
plumbery/__main__.py
|
Python
| 0 |
@@ -0,0 +1,2652 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more%0A# contributor license agreements. See the NOTICE file distributed with%0A# this work for additional information regarding copyright ownership.%0A# The ASF licenses this file to You under the Apache License, Version 2.0%0A# (the %22License%22); you may not use this file except in compliance with%0A# the License. You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0A%22%22%22%0ARuns plumbery from the command line%0A%0AExample::%0A%0A $ python -m plumbery fittings.yaml build web%0A%0A%0ATo get some help, you can type::%0A%0A $ python -m plumbery -h%0A%0A%22%22%22%0A%0Aimport sys%0Aimport argparse%0A%0Afrom engine import PlumberyEngine%0A%0Aparser = argparse.ArgumentParser(%0A prog='plumbery',%0A description='Plumbing infrastructure with Apache Libcloud.')%0A%0Aparser.add_argument(%0A 'fittings',%0A nargs=1,%0A help='File that is containing fittings plan')%0A%0Aparser.add_argument(%0A 'action',%0A nargs=1,%0A help=%22Either 'build', 'start', 'polish', 'stop' or 'destroy'%22)%0A%0Aparser.add_argument(%0A 'blueprint',%0A nargs='?',%0A help='Name of the selected blueprint. '%0A 'If omitted, all blueprints will be considered',%0A default=None)%0A%0Aargs = parser.parse_args()%0A%0Aengine = PlumberyEngine(args.fittings%5B0%5D)%0A%0Averb = args.action%5B0%5D.lower()%0Aif verb == 'build':%0A if args.blueprint is None:%0A engine.build_all_blueprints()%0A else:%0A engine.build_blueprint(args.blueprint)%0A%0Aelif verb == 'start':%0A if args.blueprint is None:%0A engine.start_all_nodes()%0A else:%0A engine.start_nodes(args.blueprint)%0A%0Aelif verb == 'polish':%0A if args.blueprint is None:%0A engine.polish_all_blueprints()%0A else:%0A engine.polish_blueprint(args.blueprint)%0A%0Aelif verb == 'stop':%0A if args.blueprint is None:%0A engine.stop_all_nodes()%0A else:%0A engine.stop_node(args.blueprint)%0A%0Aelif verb == 'destroy':%0A if args.blueprint is None:%0A engine.destroy_all_blueprints()%0A else:%0A engine.destroy_blueprint(args.blueprint)%0A%0Aelse:%0A print(%22%7B%7D: error: unrecognised action '%7B%7D'%22.format('plumbery', args.action%5B0%5D))%0A parser.print_help()%0A sys.exit(2)%0A
|
|
7182af317116db7eb3f7a278b3487ad91a3b3331
|
Add example for a clunky 3D high resolution loupe for napari
|
high-res-slider.py
|
high-res-slider.py
|
Python
| 0.000001 |
@@ -0,0 +1,2236 @@
+import functools%0Aimport numpy as np%0Aimport dask.array as da%0Afrom magicgui.widgets import Slider, Container%0Aimport napari%0A%0A# stack = ... # your dask array%0A# stack2 = stack%5B::2, ::2, ::2%5D%0A# stack4 = stack2%5B::2, ::2, ::2%5D%0A# %F0%9F%91%86 quick and easy multiscale pyramid, don't do this really%0A# see https://github.com/dask/dask-image/issues/136%0A# for better ways%0A# and, specifically, stack4 will be small but will still need%0A# to access full data. You should save all data sizes as%0A# their own arrays on disk and load those. I recommend%0A# using dask.array.Array.to_zarr.%0A# You can also read about NGFF:%0A# https://ngff.openmicroscopy.org/latest/%0A%0A# example with some example data from Liu et al, Science, 2018%0A%0Astack, stack2, stack4 = %5B%0A da.from_zarr(f'/Users/jni/data/gokul-lls/%7Bi%7D.zarr')%5B0%5D%0A for i in range(3)%0A%5D%0A%0A# a list of arrays of decreasing size is interpreted as%0A# a multiscale dataset by napari%0Amultiscale_data = %5Bstack, stack2, stack4%5D%0A%0Aviewer = napari.Viewer(ndisplay=3)%0Amultiscale_layer = viewer.add_image(%0A multiscale_data,%0A colormap='magenta',%0A scale=%5B3, 1, 1%5D,%0A)%0A%0Acrop_sizes = (30, 256, 256)%0Acropz, cropy, cropx = crop_sizes%0Ashapez, shapey, shapex = stack.shape%0Aends = np.asarray(stack.shape) - np.asarray(crop_sizes) + 1%0Astepsizes = ends // 100%0A%0Ahighres_crop_layer = viewer.add_image(%0A stack%5B:cropz, :cropy, :cropx%5D,%0A name='cropped',%0A blending='additive',%0A colormap='green',%0A scale=multiscale_layer.scale,%0A)%0A%0Adef set_slice(axis, value):%0A idx = int(value)%0A scale = np.asarray(highres_crop_layer.scale)%0A translate = np.asarray(highres_crop_layer.translate)%0A izyx = translate // scale%0A izyx%5Baxis%5D = idx%0A i, j, k = izyx%0A highres_crop_layer.data = stack%5Bi:i + cropz, j:j + cropy, k:k + cropx%5D%0A highres_crop_layer.translate = scale * izyx%0A highres_crop_layer.refresh()%0A%0A%0Asliders = %5B%0A Slider(name=axis, min=0, max=end, step=step)%0A for axis, end, step in zip('zyx', ends, stepsizes)%0A%5D%0Afor axis, slider in enumerate(sliders):%0A slider.changed.connect(%0A lambda event, axis=axis: set_slice(axis, event.value)%0A )%0A%0Acontainer_widget = Container(layout='vertical')%0Acontainer_widget.extend(sliders)%0Aviewer.window.add_dock_widget(container_widget, area='right')%0A%0Anapari.run()%0A
|
|
a9cdf3c5a1911586530fb825a7a55d2ef7c88a41
|
remove old import statement
|
pelican/contents.py
|
pelican/contents.py
|
# -*- coding: utf-8 -*-
import copy
import locale
import logging
import functools
from datetime import datetime
from os import getenv
from sys import platform, stdin
from pelican.settings import _DEFAULT_CONFIG
from pelican.utils import slugify, truncate_html_words
from pelican import signals
logger = logging.getLogger(__name__)
class Page(object):
"""Represents a page
Given a content, and metadata, create an adequate object.
:param content: the string to parse, containing the original content.
"""
mandatory_properties = ('title',)
default_template = 'page'
def __init__(self, content, metadata=None, settings=None,
filename=None):
# init parameters
if not metadata:
metadata = {}
if not settings:
settings = copy.deepcopy(_DEFAULT_CONFIG)
self.settings = settings
self._content = content
self.translations = []
local_metadata = dict(settings.get('DEFAULT_METADATA', ()))
local_metadata.update(metadata)
# set metadata as attributes
for key, value in local_metadata.items():
setattr(self, key.lower(), value)
# also keep track of the metadata attributes available
self.metadata = local_metadata
#default template if it's not defined in page
self.template = self._get_template()
# default author to the one in settings if not defined
if not hasattr(self, 'author'):
if 'AUTHOR' in settings:
self.author = Author(settings['AUTHOR'], settings)
# manage languages
self.in_default_lang = True
if 'DEFAULT_LANG' in settings:
default_lang = settings['DEFAULT_LANG'].lower()
if not hasattr(self, 'lang'):
self.lang = default_lang
self.in_default_lang = (self.lang == default_lang)
# create the slug if not existing, from the title
if not hasattr(self, 'slug') and hasattr(self, 'title'):
self.slug = slugify(self.title)
if filename:
self.filename = filename
# manage the date format
if not hasattr(self, 'date_format'):
if hasattr(self, 'lang') and self.lang in settings['DATE_FORMATS']:
self.date_format = settings['DATE_FORMATS'][self.lang]
else:
self.date_format = settings['DEFAULT_DATE_FORMAT']
if isinstance(self.date_format, tuple):
locale.setlocale(locale.LC_ALL, self.date_format[0])
self.date_format = self.date_format[1]
if hasattr(self, 'date'):
encoded_date = self.date.strftime(
self.date_format.encode('ascii', 'xmlcharrefreplace'))
if platform == 'win32':
self.locale_date = encoded_date.decode(stdin.encoding)
else:
self.locale_date = encoded_date.decode('utf')
# manage status
if not hasattr(self, 'status'):
self.status = settings['DEFAULT_STATUS']
if not settings['WITH_FUTURE_DATES']:
if hasattr(self, 'date') and self.date > datetime.now():
self.status = 'draft'
# store the summary metadata if it is set
if 'summary' in metadata:
self._summary = metadata['summary']
signals.content_object_init.send(self.__class__, instance=self)
def check_properties(self):
"""test that each mandatory property is set."""
for prop in self.mandatory_properties:
if not hasattr(self, prop):
raise NameError(prop)
@property
def url_format(self):
return {
'slug': getattr(self, 'slug', ''),
'lang': getattr(self, 'lang', 'en'),
'date': getattr(self, 'date', datetime.now()),
'author': getattr(self, 'author', ''),
'category': getattr(self, 'category',
self.settings['DEFAULT_CATEGORY']),
}
def _expand_settings(self, key):
fq_key = ('%s_%s' % (self.__class__.__name__, key)).upper()
return self.settings[fq_key].format(**self.url_format)
def get_url_setting(self, key):
key = key if self.in_default_lang else 'lang_%s' % key
return self._expand_settings(key)
@property
def content(self):
if hasattr(self, "_get_content"):
content = self._get_content()
else:
content = self._content
return content
def _get_summary(self):
"""Returns the summary of an article, based on the summary metadata
if it is set, else truncate the content."""
if hasattr(self, '_summary'):
return self._summary
else:
if self.settings['SUMMARY_MAX_LENGTH']:
return truncate_html_words(self.content, self.settings['SUMMARY_MAX_LENGTH'])
return self.content
def _set_summary(self, summary):
"""Dummy function"""
pass
summary = property(_get_summary, _set_summary, "Summary of the article."
"Based on the content. Can't be set")
url = property(functools.partial(get_url_setting, key='url'))
save_as = property(functools.partial(get_url_setting, key='save_as'))
def _get_template(self):
if hasattr(self, 'template') and self.template is not None:
return self.template
else:
return self.default_template
class Article(Page):
mandatory_properties = ('title', 'date', 'category')
default_template = 'article'
class Quote(Page):
base_properties = ('author', 'date')
class URLWrapper(object):
def __init__(self, name, settings):
self.name = unicode(name)
self.slug = slugify(self.name)
self.settings = settings
def as_dict(self):
return self.__dict__
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return self.name == unicode(other)
def __str__(self):
return str(self.name.encode('utf-8', 'replace'))
def __unicode__(self):
return self.name
def _from_settings(self, key, get_page_name=False):
"""Returns URL information as defined in settings.
When get_page_name=True returns URL without anything after {slug}
e.g. if in settings: CATEGORY_URL="cat/{slug}.html" this returns "cat/{slug}"
Useful for pagination."""
setting = "%s_%s" % (self.__class__.__name__.upper(), key)
value = self.settings[setting]
if not isinstance(value, basestring):
logger.warning(u'%s is set to %s' % (setting, value))
return value
else:
if get_page_name:
return unicode(value[:value.find('{slug}') + len('{slug}')]).format(**self.as_dict())
else:
return unicode(value).format(**self.as_dict())
page_name = property(functools.partial(_from_settings, key='URL', get_page_name=True))
url = property(functools.partial(_from_settings, key='URL'))
save_as = property(functools.partial(_from_settings, key='SAVE_AS'))
class Category(URLWrapper):
pass
class Tag(URLWrapper):
def __init__(self, name, *args, **kwargs):
super(Tag, self).__init__(unicode.strip(name), *args, **kwargs)
class Author(URLWrapper):
pass
def is_valid_content(content, f):
try:
content.check_properties()
return True
except NameError, e:
logger.error(u"Skipping %s: impossible to find informations about '%s'"\
% (f, e))
return False
|
Python
| 0.999254 |
@@ -110,30 +110,8 @@
ime%0A
-from os import getenv%0A
from
@@ -307,16 +307,17 @@
ame__)%0A%0A
+%0A
class Pa
|
2cadad76c2756852b94948088e92b9191abebbb7
|
make one pickle file with all metadata (for faster loading)
|
generate_metadata_pkl.py
|
generate_metadata_pkl.py
|
Python
| 0 |
@@ -0,0 +1,2951 @@
+import argparse%0Afrom dicom.sequence import Sequence%0Aimport glob%0Aimport re%0Afrom log import print_to_file%0Aimport cPickle as pickle%0A%0A%0Adef read_slice(path):%0A return pickle.load(open(path))%5B'data'%5D%0A%0A%0Adef convert_to_number(value):%0A value = str(value)%0A try:%0A if %22.%22 in value:%0A return float(value)%0A else:%0A return int(value)%0A except:%0A pass%0A return value%0A%0A%0Adef clean_metadata(metadatadict):%0A # Do cleaning%0A keys = sorted(list(metadatadict.keys()))%0A for key in keys:%0A value = metadatadict%5Bkey%5D%0A if key == 'PatientAge':%0A metadatadict%5Bkey%5D = int(value%5B:-1%5D)%0A if key == 'PatientSex':%0A metadatadict%5Bkey%5D = 1 if value == 'F' else -1%0A else:%0A if isinstance(value, Sequence):%0A #convert to list%0A value = %5Bi for i in value%5D%0A if isinstance(value, (list,)):%0A metadatadict%5Bkey%5D = %5Bconvert_to_number(i) for i in value%5D%0A else:%0A metadatadict%5Bkey%5D = convert_to_number(value)%0A return metadatadict%0A%0A%0Adef read_metadata(path):%0A d = pickle.load(open(path))%5B'metadata'%5D%5B0%5D%0A metadata = clean_metadata(d)%0A return metadata%0A%0A%0Adef get_patient_data(patient_data_path):%0A patient_data = %5B%5D%0A spaths = sorted(glob.glob(patient_data_path + r'/*.pkl'),%0A key=lambda x: int(re.search(r'/*_(%5Cd+)%5C.pkl$', x).group(1)))%0A pid = re.search(r'/(%5Cd+)/study$', patient_data_path).group(1)%0A for s in spaths:%0A slice_id = re.search(r'/(.*_%5Cd+%5C.pkl)$', s).group(1)%0A metadata = read_metadata(s)%0A patient_data.append(%7B'metadata': metadata,%0A 'slice_id': slice_id%7D)%0A return patient_data, pid%0A%0A%0Adef get_metadata(data_path):%0A patient_paths = sorted(glob.glob(data_path + '/*/study'))%0A metadata_dict = %7B%7D%0A for p in patient_paths:%0A patient_data, pid = get_patient_data(p)%0A print %22patient%22, pid%0A metadata_dict%5Bpid%5D = dict()%0A for pd in patient_data:%0A metadata_dict%5Bpid%5D%5Bpd%5B'slice_id'%5D%5D = pd%5B'metadata'%5D%0A%0A filename = data_path.split('/')%5B-1%5D + '_metadata.pkl'%0A with open(filename, 'w') as f:%0A pickle.dump(metadata_dict, f)%0A print 'saved to ', filename%0A return metadata_dict%0A%0A%0Aif __name__ == '__main__':%0A parser = argparse.ArgumentParser(description=__doc__)%0A required = parser.add_argument_group('required arguments')%0A #required.add_argument('-c', '--config',%0A # help='configuration to run',%0A # required=True)%0A args = parser.parse_args()%0A%0A data_paths = %5B'/mnt/storage/data/dsb15_pkl/pkl_train', '/mnt/storage/data/dsb15_pkl/pkl_validate'%5D%0A with print_to_file(%22/mnt/storage/metadata/kaggle-heart/logs/generate_metadata.log%22):%0A for d in data_paths:%0A get_metadata(d)%0A print %22log saved to '%25s'%22 %25 (%22/mnt/storage/metadata/kaggle-heart/logs/generate_metadata.log%22)%0A%0A
|
|
8939e873f4ea61169f9384eded5b8c603cfde988
|
Add crypto pre-submit that will add the openssl builder to the default try-bot list.
|
crypto/PRESUBMIT.py
|
crypto/PRESUBMIT.py
|
Python
| 0.000006 |
@@ -0,0 +1,478 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.%0A# Use of this source code is governed by a BSD-style license that can be%0A# found in the LICENSE file.%0A%0A%22%22%22Chromium presubmit script for src/net.%0A%0ASee http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts%0Afor more details on the presubmit API built into gcl.%0A%22%22%22%0A%0Adef GetPreferredTrySlaves(project, change):%0A # Changes in crypto often need a corresponding OpenSSL edit.%0A return %5B'linux_redux'%5D%0A%0A
|
|
27ed31c7a21c4468bc86aaf220e30315e366c425
|
add message to SearxParameterException - fixes #1722
|
searx/exceptions.py
|
searx/exceptions.py
|
'''
searx is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
searx is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with searx. If not, see < http://www.gnu.org/licenses/ >.
(C) 2017- by Alexandre Flament, <[email protected]>
'''
class SearxException(Exception):
pass
class SearxParameterException(SearxException):
def __init__(self, name, value):
if value == '' or value is None:
message = 'Empty ' + name + ' parameter'
else:
message = 'Invalid value "' + value + '" for parameter ' + name
super(SearxParameterException, self).__init__(message)
self.parameter_name = name
self.parameter_value = value
|
Python
| 0 |
@@ -1041,16 +1041,47 @@
essage)%0A
+ self.message = message%0A
|
a230bb1b2f1c96c7f9764ee2bf759ea9fe39e801
|
add populations tests
|
isochrones/tests/test_populations.py
|
isochrones/tests/test_populations.py
|
Python
| 0 |
@@ -0,0 +1,1770 @@
+import unittest%0A%0Afrom pandas.testing import assert_frame_equal%0Afrom scipy.stats import uniform, norm%0Afrom isochrones import get_ichrone%0Afrom isochrones.priors import ChabrierPrior, FehPrior, GaussianPrior, SalpeterPrior, DistancePrior, AVPrior%0Afrom isochrones.populations import StarFormationHistory, StarPopulation, BinaryDistribution, deredden%0A%0A%0Aclass PopulationTest(unittest.TestCase):%0A def setUp(self):%0A mist = get_ichrone(%22mist%22)%0A sfh = StarFormationHistory() # Constant SFR for 10 Gyr; or, e.g., dist=norm(3, 0.2)%0A imf = SalpeterPrior(bounds=(0.4, 10)) # bounds on solar masses%0A binaries = BinaryDistribution(fB=0.4, gamma=0.3)%0A feh = GaussianPrior(-0.2, 0.2)%0A distance = DistancePrior(max_distance=3000) # pc%0A AV = AVPrior(bounds=%5B0, 2%5D)%0A pop = StarPopulation(%0A mist, sfh=sfh, imf=imf, feh=feh, distance=distance, binary_distribution=binaries, AV=AV%0A )%0A%0A self.pop = pop%0A self.mist = mist%0A self.df = pop.generate(1000)%0A self.dereddened_df = deredden(mist, self.df)%0A%0A def test_mags(self):%0A %22%22%22Check no total mags are null%0A %22%22%22%0A mags = %5Bf%22%7Bb%7D_mag%22 for b in self.mist.bands%5D%0A assert self.df%5Bmags%5D.isnull().sum().sum() == 0%0A%0A def test_dereddening(self):%0A %22%22%22Check mass, age, feh the same when dereddened%0A %22%22%22%0A%0A cols = %5B%22initial_mass%22, %22initial_feh%22, %22requested_age%22%5D%0A assert_frame_equal(self.df%5Bcols%5D, self.dereddened_df%5Bcols%5D)%0A%0A # Check de-reddening vis-a-vis A_x%0A for b in self.mist.bands:%0A diff = (self.dereddened_df%5Bf%22%7Bb%7D_mag%22%5D + self.df%5Bf%22A_%7Bb%7D%22%5D) - self.df%5Bf%22%7Bb%7D_mag%22%5D%0A is_binary = self.df.mass_B %3E 0%0A assert diff.loc%5B~is_binary%5D.std() %3C 0.0001%0A
|
|
6ebed7a2a6488a857fc6878c2d39d26ce9bc72f5
|
Add release 9.1.0 recognition to the Dyninst API package file.
|
var/spack/repos/builtin/packages/dyninst/package.py
|
var/spack/repos/builtin/packages/dyninst/package.py
|
##############################################################################
# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Written by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License (as published by
# the Free Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Dyninst(Package):
"""API for dynamic binary instrumentation. Modify programs while they
are executing without recompiling, re-linking, or re-executing."""
homepage = "https://paradyn.org"
url = "http://www.dyninst.org/sites/default/files/downloads/dyninst/8.1.2/DyninstAPI-8.1.2.tgz"
list_url = "http://www.dyninst.org/downloads/dyninst-8.x"
version('8.2.1', 'abf60b7faabe7a2e4b54395757be39c7',
url="http://www.paradyn.org/release8.2/DyninstAPI-8.2.1.tgz")
version('8.1.2', 'bf03b33375afa66fe0efa46ce3f4b17a',
url="http://www.paradyn.org/release8.1.2/DyninstAPI-8.1.2.tgz")
version('8.1.1', 'd1a04e995b7aa70960cd1d1fac8bd6ac',
url="http://www.paradyn.org/release8.1/DyninstAPI-8.1.1.tgz")
depends_on("libelf")
depends_on("libdwarf")
depends_on("[email protected]:")
# new version uses cmake
def install(self, spec, prefix):
libelf = spec['libelf'].prefix
libdwarf = spec['libdwarf'].prefix
with working_dir('spack-build', create=True):
cmake('..',
'-DBoost_INCLUDE_DIR=%s' % spec['boost'].prefix.include,
'-DBoost_LIBRARY_DIR=%s' % spec['boost'].prefix.lib,
'-DBoost_NO_SYSTEM_PATHS=TRUE',
'-DLIBELF_INCLUDE_DIR=%s' % join_path(libelf.include, 'libelf'),
'-DLIBELF_LIBRARIES=%s' % join_path(libelf.lib, 'libelf.so'),
'-DLIBDWARF_INCLUDE_DIR=%s' % libdwarf.include,
'-DLIBDWARF_LIBRARIES=%s' % join_path(libdwarf.lib, 'libdwarf.so'),
*std_cmake_args)
make()
make("install")
@when('@:8.1')
def install(self, spec, prefix):
configure("--prefix=" + prefix)
make()
make("install")
|
Python
| 0 |
@@ -1597,16 +1597,149 @@
t-8.x%22%0A%0A
+ version('9.1.0', '5c64b77521457199db44bec82e4988ac',%0A url=%22http://www.paradyn.org/release9.1.0/DyninstAPI-9.1.0.tgz%22)%0A
vers
|
f982cd78ae79f77c2ca59440de20de37002d6658
|
Add a pcakge: libzip. (#3656)
|
var/spack/repos/builtin/packages/libzip/package.py
|
var/spack/repos/builtin/packages/libzip/package.py
|
Python
| 0.001174 |
@@ -0,0 +1,1531 @@
+##############################################################################%0A# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.%0A# Produced at the Lawrence Livermore National Laboratory.%0A#%0A# This file is part of Spack.%0A# Created by Todd Gamblin, [email protected], All rights reserved.%0A# LLNL-CODE-647188%0A#%0A# For details, see https://github.com/llnl/spack%0A# Please also see the LICENSE file for our notice and the LGPL.%0A#%0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Lesser General Public License (as%0A# published by the Free Software Foundation) version 2.1, February 1999.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and%0A# conditions of the GNU Lesser General Public License for more details.%0A#%0A# You should have received a copy of the GNU Lesser General Public%0A# License along with this program; if not, write to the Free Software%0A# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA%0A##############################################################################%0Afrom spack import *%0A%0A%0Aclass Libzip(AutotoolsPackage):%0A %22%22%22libzip is a C library for reading, creating,%0A and modifying zip archives.%22%22%22%0A%0A homepage = %22https://nih.at/libzip/index.html%22%0A url = %22https://nih.at/libzip/libzip-1.2.0.tar.gz%22%0A%0A version('1.2.0', '5c3372ab3a7897295bfefb27f745cf69')%0A
|
|
9877c21c502b27460f70e6687ed3fd6a2d3fd0d5
|
add new package at v8.3.0 (#27446)
|
var/spack/repos/builtin/packages/racket/package.py
|
var/spack/repos/builtin/packages/racket/package.py
|
Python
| 0 |
@@ -0,0 +1,1030 @@
+# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass Racket(Package):%0A %22%22%22The Racket programming language.%22%22%22%0A%0A homepage = %22https://www.racket-lang.org%22%0A url = %22https://download.racket-lang.org/releases/8.3/installers/racket-src.tgz%22%0A%0A maintainers = %5B'arjunguha'%5D%0A%0A version('8.3.0', 'c4af1a10b957e5fa0daac2b5ad785cda79805f76d11482f550626fa68f07b949')%0A%0A depends_on('libffi', type=('build', 'link', 'run'))%0A depends_on('patchutils')%0A%0A phases = %5B'configure', 'build', 'install'%5D%0A%0A def configure(self, spec, prefix):%0A with working_dir('src'):%0A configure = Executable('./configure')%0A configure(%22--prefix%22, prefix)%0A%0A def build(self, spec, prefix):%0A with working_dir('src'):%0A make()%0A%0A def install(self, spec, prefix):%0A with working_dir('src'):%0A make('install')%0A
|
|
27b1695a09694e86ce985e8dc2ff9fd1802204bd
|
Remove the print statement
|
addons/stock_location/mrp_pull.py
|
addons/stock_location/mrp_pull.py
|
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields,osv
import tools
import ir
import pooler
import netsvc
from mx import DateTime
import time
from tools.translate import _
class procurement_order(osv.osv):
_inherit = 'procurement.order'
def check_buy(self, cr, uid, ids, context=None):
for procurement in self.browse(cr, uid, ids):
for line in procurement.product_id.flow_pull_ids:
print line.location_src_id.name, line.location_id.name, line.type_proc
if line.location_id==procurement.location_id:
return line.type_proc=='buy'
return super(procurement_order, self).check_buy(cr, uid, ids)
def check_produce(self, cr, uid, ids, context=None):
for procurement in self.browse(cr, uid, ids):
for line in procurement.product_id.flow_pull_ids:
if line.location_id==procurement.location_id:
return line.type_proc=='produce'
return super(procurement_order, self).check_produce(cr, uid, ids)
def check_move(self, cr, uid, ids, context=None):
for procurement in self.browse(cr, uid, ids):
for line in procurement.product_id.flow_pull_ids:
if line.location_id==procurement.location_id:
if not line.location_src_id:
self.write(cr, uid, procurement.id, {'message': _('No source location defined to generate the picking !')})
return (line.type_proc=='move') and (line.location_src_id)
return False
def action_move_create(self, cr, uid, ids,context=None):
proc_obj = self.pool.get('procurement.order')
move_obj = self.pool.get('stock.move')
location_obj = self.pool.get('stock.location')
wf_service = netsvc.LocalService("workflow")
res_user=self.pool.get('res.users').browse(cr,uid,uid)
for proc in proc_obj.browse(cr, uid, ids, context=context):
line = None
for line in proc.product_id.flow_pull_ids:
if line.location_id==proc.location_id:
break
assert line, 'Line can not be False if we are on this state of the workflow'
origin = (proc.origin or proc.name or '').split(':')[0] +':'+line.name
picking_id = self.pool.get('stock.picking').create(cr, uid, {
'origin': origin,
'company_id': res_user.company_id and res_user.company_id.id or False,
'type': line.picking_type,
'stock_journal_id': line.journal_id and line.journal_id.id or False,
'move_type': 'one',
'address_id': line.partner_address_id.id,
'note': line.name, # TODO: note on procurement ?
'invoice_state': line.invoice_state,
})
move_id = self.pool.get('stock.move').create(cr, uid, {
'name': line.name,
'picking_id': picking_id,
'company_id': res_user.company_id and res_user.company_id.id or False,
'product_id': proc.product_id.id,
'date_planned': proc.date_planned,
'product_qty': proc.product_qty,
'product_uom': proc.product_uom.id,
'product_uos_qty': (proc.product_uos and proc.product_uos_qty)\
or proc.product_qty,
'product_uos': (proc.product_uos and proc.product_uos.id)\
or proc.product_uom.id,
'address_id': line.partner_address_id.id,
'location_id': line.location_src_id.id,
'location_dest_id': line.location_id.id,
'move_dest_id': proc.move_id and proc.move_id.id or False, # to verif, about history ?
'tracking_id': False,
'cancel_cascade': line.cancel_cascade,
'state': 'confirmed',
'note': line.name, # TODO: same as above
})
if proc.move_id and proc.move_id.state in ('confirmed'):
self.pool.get('stock.move').write(cr,uid, [proc.move_id.id], {
'state':'waiting'
}, context=context)
proc_id = self.pool.get('procurement.order').create(cr, uid, {
'name': line.name,
'origin': origin,
'company_id': res_user.company_id and res_user.company_id.id or False,
'date_planned': proc.date_planned,
'product_id': proc.product_id.id,
'product_qty': proc.product_qty,
'product_uom': proc.product_uom.id,
'product_uos_qty': (proc.product_uos and proc.product_uos_qty)\
or proc.product_qty,
'product_uos': (proc.product_uos and proc.product_uos.id)\
or proc.product_uom.id,
'location_id': line.location_src_id.id,
'procure_method': line.procure_method,
'move_id': move_id,
})
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'stock.picking', picking_id, 'button_confirm', cr)
wf_service.trg_validate(uid, 'procurement.order', proc_id, 'button_confirm', cr)
if proc.move_id:
self.pool.get('stock.move').write(cr, uid, [proc.move_id.id],
{'location_id':proc.location_id.id})
self.write(cr, uid, [proc.id], {'state':'running','message':_('Moved from other location')})
return False
procurement_order()
|
Python
| 0.004301 |
@@ -1360,78 +1360,8 @@
-print line.location_src_id.name, line.location_id.name, line.type_proc
%0A
|
bd49a4c82e011d7c5025abc15324220b1496f8c8
|
add deepspeech.py to support DeepSpeech
|
deepspeech.py
|
deepspeech.py
|
Python
| 0.000001 |
@@ -0,0 +1,1008 @@
+import subprocess%0A%0Aclass DeepSpeechRecognizer():%0A def __init__(self, model=None, alphabet=None, lm=None, trie=None):%0A self.model = model%0A self.alphabet = alphabet%0A self.lm = lm%0A self.trie = trie%0A%0A def recognize(self, audio_file):%0A %22%22%22recognize audio file%0A%0A args:%0A audio_file (str)%0A%0A return:%0A result (str/False)%0A %22%22%22%0A output = subprocess.getoutput(%22deepspeech --model %7B%7D --alphabet %7B%7D --lm %7B%7D --trie %7B%7D --audio %7B%7D%22.format(self.model, self.alphabet, self.lm, self.trie, audio_file))%0A for index, line in enumerate(output.split(%22%5Cn%22)):%0A if line.startswith(%22Inference took %22):%0A return output.split(%22%5Cn%22)%5Bindex + 1%5D%0A return None%0A%0Aif __name__==%22__main__%22:%0A recognizer = DeepSpeechRecognizer(r%22models/output_graph.pbmm%22, r%22models/alphabet.txt%22, r%22models/lm.binary%22, r%22models/trie%22)%0A result = recognizer.recognize(%22audio/8455-210777-0068.wav%22)%0A print(result)%0A %0A%0A
|
|
ca09dc0b9d555f10aafb17380a9a8592727d0a0f
|
Add dp/SPOJ-ROCK.py
|
dp/SPOJ-ROCK.py
|
dp/SPOJ-ROCK.py
|
Python
| 0.000001 |
@@ -0,0 +1,772 @@
+def compute_zero_counts(rock_desc):%0A zero_counts = %5B0 for i in xrange(N+1)%5D%0A for i in xrange(1, N+1):%0A zero_counts%5Bi%5D = zero_counts%5Bi-1%5D%0A if rock_desc%5Bi-1%5D == '0':%0A zero_counts%5Bi%5D += 1%0A%0A return zero_counts%0A%0Adef score(zero_counts, start, end):%0A length = end - start + 1%0A zeroes = zero_counts%5Bend%5D - zero_counts%5Bstart-1%5D%0A ones = length - zeroes%0A if ones %3E zeroes:%0A return length%0A return 0%0A%0At = int(raw_input())%0Afor case in xrange(t):%0A N = int(raw_input())%0A rock_desc = raw_input()%0A%0A zero_counts = compute_zero_counts(rock_desc)%0A dp = %5B0 for i in xrange(N+1)%5D%0A%0A for i in xrange(1,N+1):%0A for j in xrange(0,i):%0A dp%5Bi%5D = max(dp%5Bi%5D, dp%5Bj%5D + score(zero_counts, j+1, i))%0A%0A print dp%5BN%5D%0A%0A%0A%0A%0A
|
|
50d05aabc2eb1d5bcb20d457dd05d2882b983afa
|
Add installation script for profiler.
|
install_and_run.py
|
install_and_run.py
|
Python
| 0 |
@@ -0,0 +1,2006 @@
+# Copyright 2020 The TensorFlow Authors. All Rights Reserved.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A# ==============================================================================%0A%22%22%22Install and run the TensorBoard plugin for performance analysis.%0A%0A Usage: python3 install_and_run.py --envdir ENVDIR --logdir LOGDIR%0A%22%22%22%0A%0A# Lint as: python3%0A%0Aimport argparse%0Aimport os%0Aimport subprocess%0A%0A%0Adef run(*args):%0A %22%22%22Runs a shell command.%22%22%22%0A subprocess.run(' '.join(args), shell=True, check=True)%0A%0A%0Aclass VirtualEnv(object):%0A %22%22%22Creates and runs programs in a virtual environment.%22%22%22%0A%0A def __init__(self, envdir):%0A self.envdir = envdir%0A run('virtualenv', '--system-site-packages', '-p', 'python3', self.envdir)%0A%0A def run(self, program, *args):%0A run(os.path.join(self.envdir, 'bin', program), *args)%0A%0A%0Adef main():%0A parser = argparse.ArgumentParser(description=__doc__)%0A parser.add_argument('--envdir', help='Virtual environment', required=True)%0A parser.add_argument('--logdir', help='TensorBoard logdir', required=True)%0A args = parser.parse_args()%0A venv = VirtualEnv(args.envdir)%0A venv.run('pip3', 'uninstall', '-q', '-y', 'tensorboard')%0A venv.run('pip3', 'uninstall', '-q', '-y', 'tensorflow')%0A venv.run('pip3', 'install', '-q', '-U', 'tf-nightly')%0A venv.run('pip3', 'install', '-q', '-U', 'tb-nightly')%0A venv.run('pip3', 'install', '-q', '-U', 'tensorboard_plugin_profile')%0A venv.run('tensorboard', '--logdir=' + args.logdir, '--bind_all')%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
734967196c8f0577b218802c16d9eab31c9e9054
|
Add problem 36, palindrome binaries
|
problem_36.py
|
problem_36.py
|
Python
| 0.999978 |
@@ -0,0 +1,460 @@
+from time import time%0A%0A%0Adef is_palindrome(s):%0A for idx in range(len(s)/2):%0A if s%5Bidx%5D != s%5B-1*idx - 1%5D:%0A return False%0A return True%0A%0A%0Adef main():%0A palindrom_nums = %5Bnum for num in range(int(1e6)) if is_palindrome(str(num)) and is_palindrome(str(bin(num))%5B2:%5D)%5D%0A print 'Palindroms:', palindrom_nums%0A print 'Palindrom sum:', sum(palindrom_nums)%0A%0A%0Aif __name__ == '__main__':%0A t = time()%0A main()%0A print 'Time:', time() - t%0A
|
|
ad9a9df8e144c41456aeded591081a3a339853f3
|
Create RLU_forward_backward.py
|
Neural-Networks/RLU_forward_backward.py
|
Neural-Networks/RLU_forward_backward.py
|
Python
| 0.000019 |
@@ -0,0 +1,575 @@
+%0Afrom numpy import *%0Afrom RLU_neural_forward import *%0Afrom RLU_back_propagation import *%0A%0Adef forwardBackward(xi, x, y, MT, time_queue, good_queue, DELTA_queue):%0A%09A = neural_forward(xi, x, MT)%0A%09check = argmax(A%5B-xi%5B-1%5D:%5D)%0A%09# send back some progress statistic%0A %09if y%5Bcheck%5D-1 == 0:%0A%09%09good = good_queue.get()%0A%09%09good += 1%0A%09%09good_queue.put(good)%0A%09%09good_queue.task_done()%0A%0A%09time = time_queue.get()%0A%09time += 1%0A%09time_queue.put(time)%0A%09time_queue.task_done()%0A%0A%09DELTA = DELTA_queue.get()%0A%09DELTA = DELTA + back_propagation(y, A, MT, xi)%0A%09DELTA_queue.put(DELTA)%0A%09DELTA_queue.task_done()%0A
|
|
1ecb4a0711304af13f41ae1aae67792057783334
|
Create ScaperUtils.py
|
data/ScaperUtils.py
|
data/ScaperUtils.py
|
Python
| 0 |
@@ -0,0 +1,2581 @@
+class ScraperUtil (object) :%0A class Base :%0A def __init__(self,data_get,data_parse, data_formatter=None) :%0A self.get_data = data_get%0A self.parse_data = data_parse%0A self.data_formatter = data_formatter%0A%0A class Yahoo(Base) :%0A def __init__(self,data_get,data_format,data_parse) :%0A ScraperUtil.Base.__init__( self,data_get,data_parse,data_format)%0A def __call__(self,symbol) :%0A ret = self.get_data(symbol)%0A if self.data_formatter is not None :%0A ret = self.data_formatter(ret)%0A for token in self.parse_data(ret) :%0A yield token%0A%0A class Nasdaq(Base) :%0A def __init__(self,data_get,data_parse,data_formatter,exchange_list=None,unwanted_keys_list=None) : %0A ScraperUtil.Base.__init__( self,data_get,data_parse,data_formatter)%0A self.exchanges=%5B%22nyse%22, %22nasdaq%22%5D%0A self.unwanted_keys=%5B'Summary Quote','MarketCap','LastSale','IPOyear','Unnamed: 8'%5D%0A if exchange_list is not None : self.exchanges = exchange_list%0A if unwanted_keys_list is not None : self.unwanted_keys = unwanted_keys_list%0A def __call__(self,exchange_list=None,unwanted_keys_list=None) :%0A exchanges = self.exchanges%0A unwanted_keys = self.unwanted_keys%0A if exchange_list is not None : exchanges = exchange_list%0A if unwanted_keys_list is not None : unwanted_keys = unwanted_keys_list%0A ret = None%0A for exchange in exchanges : %0A if ret is None : ret = self.get_data(exchange)%0A else : ret = b%22%22.join(%5Bret, self.get_data(exchange)%5D)%0A ret = self.parse_data(ret)%0A if self.data_formatter is not None :%0A ret = self.data_formatter(ret,unwanted_keys,exchange)%0A return ret.reindex()%0A class NasdaqService() :%0A def __init__(self,service) :%0A self.service = service%0A self.fresh = None%0A self.cache = None%0A def __call__(self) :%0A if self.cache is None or not self.fresh(): %0A self.cache = self.service()%0A self.fresh = TimeUtil.ExpireTimer(24*60) %0A return self.cache%0A class StockService() :%0A def __init__(self) :%0A self.fresh = %7B%7D%0A self.cache = %7B%7D%0A def __call__(self,stock) :%0A if stock not in self.cache.keys() or not self.fresh%5Bstock%5D(): %0A y1,y2,r = get_year_parameters()%0A self.cache%5Bstock%5D = get_yahoo_historical(stock,y1)%0A self.fresh%5Bstock%5D = TimeUtil.ExpireTimer(24*60) %0A return self.cache%5Bstock%5D%0A
|
|
6d33ed73adeea4808ed4b3b9bd8642ad83910dfc
|
add ridgeline example (#1519)
|
altair/examples/ridgeline_plot.py
|
altair/examples/ridgeline_plot.py
|
Python
| 0 |
@@ -0,0 +1,1636 @@
+%22%22%22%0ARidgeline plot (Joyplot) Example%0A--------------------------------%0AA %60Ridgeline plot %3Chttps://serialmentor.com/blog/2017/9/15/goodbye-joyplots%3E%60_%0Achart is a chart that lets you visualize distribution of a numeric value for %0Aseveral groups.%0A%0ASuch a chart can be created in Altair by first transforming the data into a%0Asuitable representation.%0A%0A%22%22%22%0A# category: other charts%0Aimport altair as alt%0Afrom vega_datasets import data%0A%0Asource = data.seattle_weather.url%0A%0Astep = 20%0Aoverlap = 1%0A%0Aridgeline = alt.Chart(source).transform_timeunit(%0A Month='month(date)'%0A).transform_joinaggregate(%0A mean_temp='mean(temp_max)', groupby=%5B'Month'%5D%0A).transform_bin(%0A %5B'bin_max', 'bin_min'%5D, 'temp_max'%0A).transform_aggregate(%0A value='count()', groupby=%5B'Month', 'mean_temp', 'bin_min', 'bin_max'%5D%0A).transform_impute(%0A impute='value', groupby=%5B'Month', 'mean_temp'%5D, key='bin_min', value=0%0A).mark_line(%0A interpolate='monotone',%0A fillOpacity=0.8,%0A stroke='lightgray',%0A strokeWidth=0.5%0A).encode(%0A alt.X('bin_min:Q', bin='binned', title='Maximum Daily Temperature (C)'),%0A alt.Y(%0A 'value:Q',%0A scale=alt.Scale(range=%5Bstep, -step * overlap%5D),%0A axis=None%0A ),%0A alt.Fill(%0A 'mean_temp:Q',%0A legend=None,%0A scale=alt.Scale(domain=%5B30, 5%5D, scheme='redyellowblue')%0A ),%0A alt.Row(%0A 'Month:T',%0A title=None,%0A header=alt.Header(labelAngle=0, labelAlign='right', format='%25B')%0A )%0A).properties(%0A bounds='flush', title='Seattle Weather', height=step%0A).configure_facet(%0A spacing=0%0A).configure_view(%0A stroke=None%0A).configure_title(%0A anchor='end'%0A)%0Aridgeline%0A%0A
|
|
dfe65e6839a4347c7acfc011f052db6ec4ee1d9d
|
test Task
|
tests/unit/test_task.py
|
tests/unit/test_task.py
|
Python
| 0.999998 |
@@ -0,0 +1,1563 @@
+import sys%0Afrom zorn import tasks%0Afrom io import StringIO%0A%0Adef test_task():%0A task = tasks.Task()%0A assert task.verbosity == 1%0A%0Adef test_parse_verbosity_standard():%0A silent = False%0A verbose = False%0A verbosity = tasks.Task.parse_verbosity(verbose, silent)%0A assert verbosity == 1%0A%0Adef test_parse_verbosity_silent():%0A silent = True%0A verbose = False%0A verbosity = tasks.Task.parse_verbosity(verbose, silent)%0A assert verbosity == 0%0A silent = True%0A verbose = True%0A verbosity = tasks.Task.parse_verbosity(verbose, silent)%0A assert verbosity == 0%0A%0Adef test_parse_verbosity_verbose():%0A silent = False%0A verbose = True%0A verbosity = tasks.Task.parse_verbosity(verbose, silent)%0A assert verbosity == 2%0A%0Adef test_comunicate_standard_verbosity():%0A task = tasks.Task(1)%0A stdout_ = sys.stdout%0A stream = StringIO()%0A sys.stdout = stream%0A task.communicate('standard')%0A task.communicate('verbose', False)%0A sys.stdout = stdout_%0A assert stream.getvalue() == 'standard%5Cn'%0A%0Adef test_comunicate_silent():%0A task = tasks.Task(0)%0A stdout_ = sys.stdout%0A stream = StringIO()%0A sys.stdout = stream%0A task.communicate('standard')%0A task.communicate('verbose', False)%0A sys.stdout = stdout_%0A assert stream.getvalue() == ''%0A%0Adef test_comunicate_verbose():%0A task = tasks.Task(2)%0A stdout_ = sys.stdout%0A stream = StringIO()%0A sys.stdout = stream%0A task.communicate('standard')%0A task.communicate('verbose', False)%0A sys.stdout = stdout_%0A assert stream.getvalue() == 'standard%5Cnverbose%5Cn'%0A
|
|
02156d3e9140b7f8f61b79816891ede2fff2cc49
|
rename models to properties
|
properties.py
|
properties.py
|
Python
| 0.000005 |
@@ -0,0 +1,207 @@
+import ConfigParser%0Aimport os%0Aimport sys%0A%0Asubreddit = 'taigeilove'%0Auser_agent = 'Python:whalehelpbot:v1.0 (by /u/Noperative)'%0A%0Ageneral_words = %5B%5D%0Afirst_time_words = %5B%5D%0Aexpedition_words = %5B%5D%0Aquest_words = %5B%5D%0A
|
|
e74c3273f840afbca25936083abdfb6577b4fdd0
|
Devuelve lista de etiquetas y atributos
|
smallsmilhandler.py
|
smallsmilhandler.py
|
Python
| 0 |
@@ -0,0 +1,1226 @@
+#!/usr/bin/python%0A# -*- coding: utf-8 -*-%0A#CELIA GARCIA FERNANDEz%0A%0Afrom xml.sax import make_parser%0Afrom xml.sax.handler import ContentHandler%0A%0Aclass SmallSMILHandler(ContentHandler):%0A%0A def __init__ (self):%0A %0A self.lista = %5B%5D%0A self.etiquetas = %5B'root-layout', 'region', 'img', 'audio', 'textstream'%5D%0A self.attributosD = %7B%0A 'root-layout': %5B'width', 'height'%5D,%0A 'region': %5B'id','top','left'%5D,%0A 'img': %5B'scr','region','begin','dur'%5D,%0A 'audio': %5B'src','begin','dur'%5D,%0A 'textstream': %5B'src', 'region'%5D%0A %7D%0A %0A def startElement(self, name, attrs):%0A%0A diccionario = %7B%7D%0A%0A if name in self.etiquetas:%0A diccionario%5B%22name%22%5D = name%0A for key in self.attributosD%5Bname%5D:%0A diccionario%5Bkey%5D = attrs.get(key, %22%22)%0A self.lista.append(diccionario)%0A %0A def get_tags(self):%0A return self.lista%0A %0A%0Aif __name__ == %22__main__%22:%0A%0A parser = make_parser()%0A small = SmallSMILHandler()%0A parser.setContentHandler(small)%0A parser.parse(open('karaoke.smil'))%0A %0A print small.get_tags() %0A %0A %0A %0A %0A %0A %0A %0A %0A
|
|
024e7fe473a19a16b7e34203aef2841af7a3aad4
|
add markreads script
|
etc/markreads.py
|
etc/markreads.py
|
Python
| 0 |
@@ -0,0 +1,459 @@
+#!/usr/bin/env python%0A%0Aimport pysam%0Aimport sys%0A%0A%0Adef markreads(bamfn, outfn):%0A bam = pysam.AlignmentFile(bamfn, 'rb')%0A out = pysam.AlignmentFile(outfn, 'wb', template=bam)%0A%0A for read in bam.fetch(until_eof=True):%0A tags = read.tags%0A tags.append(('BS',1))%0A read.tags = tags%0A out.write(read)%0A%0Aif len(sys.argv) == 3:%0A markreads(*sys.argv%5B1:%5D)%0A%0Aelse:%0A print 'usage:', sys.argv%5B0%5D, '%3Cinput BAM%3E %3Coutput BAM%3E'%0A %0A
|
|
0bd69e17d75cf1ecaa53153fd07abf2e139f57b7
|
add function0-input.py
|
input/function0-input.py
|
input/function0-input.py
|
Python
| 0.000021 |
@@ -0,0 +1,180 @@
+# -*- coding: utf-8 -*-%0A# Author Frank Hu%0A# iDoulist Function 0 - input%0A%0Aimport urllib2%0A%0Aresponse = urllib2.urlopen(%22http://www.douban.com/doulist/38390646/%22)%0Aprint response.read()
|
|
0b840dadc03d7d4256e4969620413090d5808868
|
fix g18/zx arc preview
|
lib/python/rs274/interpret.py
|
lib/python/rs274/interpret.py
|
# This is a component of AXIS, a front-end for emc
# Copyright 2004, 2005, 2006 Jeff Epler <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import math
class Translated:
offset_x = offset_y = offset_z = offset_a = offset_b = offset_c = 0
def translate(self, x,y,z,a,b,c):
return [x+self.offset_x, y+self.offset_y, z+self.offset_z,
a+self.offset_a, b+self.offset_b, c+self.offset_c]
def straight_traverse(self, *args):
self.straight_traverse_translated(*self.translate(*args))
def straight_feed(self, *args):
self.straight_feed_translated(*self.translate(*args))
def set_origin_offsets(self, offset_x, offset_y, offset_z, offset_a, offset_b, offset_c):
self.offset_x = offset_x #- (self.ox - self.offset_x)
self.offset_y = offset_y #- (self.oy - self.offset_y)
self.offset_z = offset_z #- (self.oz - self.offset_z)
class ArcsToSegmentsMixin:
plane = 1
def set_plane(self, plane):
self.plane = plane
def arc_feed(self, x1, y1, cx, cy, rot, z1, a, b, c, u, v, w):
if self.plane == 1:
f = n = [x1+self.offset_x,y1+self.offset_y,z1+self.offset_z, a, b, c, 0, 0, 0]
cx=cx+self.offset_x
cy=cy+self.offset_y
xyz = [0,1,2]
elif self.plane == 3:
f = n = [y1+self.offset_x,z1+self.offset_y,x1+self.offset_z, a, b, c, 0, 0, 0]
cx=cx+self.offset_x
cy=cy+self.offset_z
xyz = [2,0,1]
else:
f = n = [z1+self.offset_x,x1+self.offset_y,y1+self.offset_z, a, b, c, 0, 0, 0]
cx=cx+self.offset_y
cy=cy+self.offset_z
xyz = [1,2,0]
ox, oy, oz = self.lo
o = [ox, oy, oz, 0, 0, 0, 0, 0, 0]
theta1 = math.atan2(o[xyz[1]]-cy, o[xyz[0]]-cx)
theta2 = math.atan2(n[xyz[1]]-cy, n[xyz[0]]-cx)
rad = math.hypot(o[xyz[0]]-cx, o[xyz[1]]-cy)
if rot < 0:
if theta2 >= theta1: theta2 -= math.pi * 2
else:
if theta2 <= theta1: theta2 += math.pi * 2
def interp(low, high):
return low + (high-low) * i / steps
steps = max(8, int(128 * abs(theta1 - theta2) / math.pi))
p = [0] * 9
for i in range(1, steps):
theta = interp(theta1, theta2)
p[xyz[0]] = math.cos(theta) * rad + cx
p[xyz[1]] = math.sin(theta) * rad + cy
p[xyz[2]] = interp(o[xyz[2]], n[xyz[2]])
p[3] = interp(o[3], n[3])
p[4] = interp(o[4], n[4])
p[5] = interp(o[5], n[5])
p[6] = interp(o[6], n[6])
p[7] = interp(o[7], n[7])
p[8] = interp(o[8], n[8])
self.straight_arcsegment(*p)
self.straight_arcsegment(*n)
class PrintCanon:
def set_origin_offsets(self, *args):
print "set_origin_offsets", args
def next_line(self, state):
print "next_line", state.sequence_number
self.state = state
def set_plane(self, plane):
print "set plane", plane
def set_feed_rate(self, arg):
print "set feed rate", arg
def comment(self, arg):
print "#", arg
def straight_traverse(self, *args):
print "straight_traverse %.4g %.4g %.4g %.4g %.4g %.4g" % args
def straight_feed(self, *args):
print "straight_feed %.4g %.4g %.4g %.4g %.4g %.4g" % args
def dwell(self, arg):
if arg < .1:
print "dwell %f ms" % (1000 * arg)
else:
print "dwell %f seconds" % arg
def arc_feed(self, *args):
print "arc_feed %.4g %.4g %.4g %.4g %.4g %.4g %.4g %.4g %.4g" % args
# vim:ts=8:sts=4:et:
|
Python
| 0 |
@@ -2127,33 +2127,33 @@
=cx+self.offset_
-x
+z
%0A cy=
@@ -2159,33 +2159,33 @@
=cy+self.offset_
-z
+x
%0A xyz
|
70d5b47a66d883187574c409ac08ece24277d292
|
Add the test.py example that is cited in the cytomine.org documentation
|
examples/test.py
|
examples/test.py
|
Python
| 0 |
@@ -0,0 +1,1728 @@
+# -*- coding: utf-8 -*-%0A%0A# * Copyright (c) 2009-2020. Authors: see NOTICE file.%0A# *%0A# * Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# * you may not use this file except in compliance with the License.%0A# * You may obtain a copy of the License at%0A# *%0A# * http://www.apache.org/licenses/LICENSE-2.0%0A# *%0A# * Unless required by applicable law or agreed to in writing, software%0A# * distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# * See the License for the specific language governing permissions and%0A# * limitations under the License.%0A%0A# * This script is just a simple example used to verify if the %0A# * Cytomine Python Client is correctly installed.%0A# * Using a correct Cytomine instance URL, and keys of a user, it will just fetch his username.%0A%0Aimport sys%0Afrom argparse import ArgumentParser%0A%0A__author__ = %22Renaud Hoyoux %[email protected]%3E%22%0A%0Aif __name__ == '__main__':%0A from cytomine import Cytomine%0A from cytomine.models.user import *%0A%0A parser = ArgumentParser(prog=%22Cytomine Python client example%22)%0A%0A # Cytomine%0A parser.add_argument('--cytomine_host', dest='host', default='demo.cytomine.be', help=%22The Cytomine host%22)%0A parser.add_argument('--cytomine_public_key', dest='public_key', help=%22The Cytomine public key%22)%0A parser.add_argument('--cytomine_private_key', dest='private_key', help=%22The Cytomine private key%22)%0A params, other = parser.parse_known_args(sys.argv%5B1:%5D)%0A%0A with Cytomine(host=params.host, public_key=params.public_key, private_key=params.private_key) as cytomine:%0A # Get the connected user%0A user = CurrentUser().fetch()%0A print(user)
|
|
1c2bde23ffc6188fe839b36011775663f86c8919
|
Create config.py
|
config.py
|
config.py
|
Python
| 0.000002 |
@@ -0,0 +1,347 @@
+# -*- coding: utf-8 -*-%0Aimport configparser%0A%0Aclass Config:%0A%09_cp = None%0A%0A%09def load():%0A%09%09Config._cp = configparser.ConfigParser()%0A%09%09Config._cp.read(%22config.ini%22)%0A%09%09for category in Config._cp.sections():%0A%09%09%09temp = %7B%7D%0A%09%09%09for op in Config._cp.options(category):%0A%09%09%09%09temp%5Bop%5D = Config._cp%5Bcategory%5D%5Bop%5D%0A%09%09%09%09setattr(Config, category, temp)%0AConfig.load()%0A
|
|
0712d78cf76c1d3f699317fcc64db3fe60dc6266
|
Add utility functions for generating documentation
|
docs/utils.py
|
docs/utils.py
|
Python
| 0.000001 |
@@ -0,0 +1,158 @@
+def cleanup_docstring(docstring):%0A doc = %22%22%0A stripped = %5Bline.strip() for line in docstring.split(%22%5Cn%22)%5D%0A doc += '%5Cn'.join(stripped)%0A return doc%0A%0A
|
|
1bf634bd24d94a7d7ff358cea3215bba5b59d014
|
Create power_of_two.py in bit manipulation
|
bit_manipulation/power_of_two/python/power_of_two.py
|
bit_manipulation/power_of_two/python/power_of_two.py
|
Python
| 0.000011 |
@@ -0,0 +1,336 @@
+# Check if given number is power of 2 or not %0A %0A# Function to check if x is power of 2 %0Adef isPowerOfTwo (x): %0A %0A # First x in the below expression is for the case when x is 0 %0A return (x and (not(x & (x - 1))) ) %0A %0A# Driver code %0Ax = int(input(%22Enter a no:%22))%0Aif(isPowerOfTwo(x)): %0A print('Yes') %0Aelse: %0A print('No') %0A
|
|
00fa30068b36385c8b9b574074743af01aedff1f
|
find best parameters
|
mkTargeted/find_parameters.py
|
mkTargeted/find_parameters.py
|
Python
| 0.999989 |
@@ -0,0 +1,1694 @@
+%0Adef common_elements(list1, list2):%0A return %5Belement for element in list1 if element in list2%5D%0A%0A%0Angap_best = 0 %0Aglimit_best = 0%0Afit_best = -1%0A%0Afor ngap in range(5,50):%0A for glimit in range(100,1500,100):%0A data = t2%0A data = updateArray(data)%0A #data = findClusterRedshift(data)%0A data%5B'CLUSZ'%5D = tZ%0A data = findSeperationSpatial(data, center)%0A data = findLOSV(data)%0A # make initial cuts%0A mask = abs(data%5B'LOSV'%5D) %3C 5000%0A data = data%5Bmask%5D%0A while True:%0A try:%0A if size == data.size:%0A break%0A except NameError:%0A pass%0A%0A size = data.size%0A #print 'size', data.size%0A%0A #data = rejectInterlopers(data)%0A flag = False%0A try:%0A x = shifty_gapper(data%5B'SEP'%5D, data%5B'Z'%5D, tZ, ngap=ngap,%0A glimit=glimit)%0A except:%0A flag = True%0A break%0A data = data%5Bx%5D%0A #data = findLOSVD(data)%0A data = findLOSVDgmm(data)%0A data%5B'LOSVD'%5D = data%5B'LOSVDgmm'%5D%0A%0A data = findR200(data)%0A mask = data%5B'SEP'%5D %3C data%5B'R200'%5D%5B0%5D%0A data = data%5Bmask%5D%0A%0A data = findClusterRedshift(data)%0A data = findSeperationSpatial(data, center)%0A data = findLOSV(data)%0A%0A if not flag:%0A matched = len(common_elements(t%5B'HALOID'%5D, data%5B'HALOID'%5D))%0A fit = matched/t.size + 1/data.size%0A if fit %3E fit_best:%0A fit_best = fit%0A ngap_best = ngap%0A glimit_best = glimit%0A else:%0A pass%0A%0A%0A
|
|
5124d27adbaac0304b2b9a318461257ed9d678fc
|
valid number
|
python/valid_num.py
|
python/valid_num.py
|
Python
| 0.999385 |
@@ -0,0 +1,644 @@
+#! /usr/bin/python%0A%0A%0A'''%0AValidate if a given string is numeric.%0A%0ASome examples:%0A%220%22 =%3E true%0A%22 0.1 %22 =%3E true%0A%22abc%22 =%3E false%0A%221 a%22 =%3E false%0A%222e10%22 =%3E true%0ANote: It is intended for the problem statement to be ambiguous. You should gather all requirements up front before implementing one.%0A'''%0A%0Aclass Solution:%0A # @param %7Bstring%7D s%0A # @return %7Bboolean%7D%0A def isNumber(self, s):%0A try:%0A float(s)%0A return True%0A except ValueError:%0A return False%0A%0Aif __name__ =='__main__':%0A solution = Solution()%0A a = %5B'0', '0.1', 'abc', '1 a', '2e10'%5D%0A print %5B solution.isNumber(string) for string in a %5D%0A
|
|
8fddde260af6ea1e6de8491dd99dca671634327c
|
Add test for the matrix representation function.
|
test/operator/utility_test.py
|
test/operator/utility_test.py
|
Python
| 0 |
@@ -0,0 +1,2030 @@
+# Copyright 2014, 2015 The ODL development group%0A#%0A# This file is part of ODL.%0A#%0A# ODL is free software: you can redistribute it and/or modify%0A# it under the terms of the GNU General Public License as published by%0A# the Free Software Foundation, either version 3 of the License, or%0A# (at your option) any later version.%0A#%0A# ODL is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU General Public License for more details.%0A#%0A# You should have received a copy of the GNU General Public License%0A# along with ODL. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A%0A%0A# Imports for common Python 2/3 codebase%0Afrom __future__ import print_function, division, absolute_import%0Afrom future import standard_library%0Astandard_library.install_aliases()%0Afrom builtins import str, super%0A%0A# External module imports%0Aimport pytest%0Aimport numpy as np%0A%0A# ODL imports%0Aimport odl%0Afrom odl.operator.utility import matrix_representation%0Afrom odl.util.testutils import almost_equal%0A%0A%0Aclass MultiplyOp(odl.Operator):%0A%0A %22%22%22Multiply with matrix.%0A %22%22%22%0A%0A def __init__(self, matrix, domain=None, range=None):%0A domain = (odl.Rn(matrix.shape%5B1%5D)%0A if domain is None else domain)%0A range = (odl.Rn(matrix.shape%5B0%5D)%0A if range is None else range)%0A self.matrix = matrix%0A%0A super().__init__(domain, range, linear=True)%0A%0A def _apply(self, rhs, out):%0A np.dot(self.matrix, rhs.data, out=out.data)%0A%0A @property%0A def adjoint(self):%0A return MultiplyOp(self.matrix.T, self.range, self.domain)%0A%0A%0Adef test_matrix_representation():%0A # Verify that the matrix representation function returns the correct matrix%0A%0A A = np.random.rand(3, 3)%0A%0A Aop = MultiplyOp(A)%0A%0A the_matrix = matrix_representation(Aop)%0A%0A assert almost_equal(np.sum(np.abs(A - the_matrix)), 1e-6)%0A%0A%0Aif __name__ == '__main__':%0A pytest.main(str(__file__.replace('%5C%5C', '/')) + ' -v')%0A
|
|
8b92e55fa202723f7859cd1ea22e835e5c693807
|
Add some time handling functions
|
Instanssi/kompomaatti/misc/awesometime.py
|
Instanssi/kompomaatti/misc/awesometime.py
|
Python
| 0.000015 |
@@ -0,0 +1,1822 @@
+# -*- coding: utf-8 -*-%0A%0Afrom datetime import datetime, timedelta%0A%0Adef todayhelper():%0A today = datetime.today()%0A return datetime(day=today.day, year=today.year, month=today.month)%0A%0Adef format_single_helper(t):%0A now = datetime.now()%0A today = todayhelper()%0A tomorrow = today + timedelta(days=1)%0A the_day_after_tomorrow = today + timedelta(days=2) # Must honor the movie!%0A %0A if t %3C now:%0A return %22p%C3%A4%C3%A4ttynyt%22%0A elif t %3E= now and t %3C tomorrow:%0A return %22t%C3%A4n%C3%A4%C3%A4n klo. %22 + t.strftime(%22%25H:%25M%22)%0A elif t %3E= tomorrow and t %3C the_day_after_tomorrow:%0A return %22huomenna klo. %22 + t.strftime(%22%25H:%25M%22)%0A elif t %3E= the_day_after_tomorrow and t %3C today+timedelta(days=3):%0A return %22ylihuomenna klo. %22 + t.strftime(%22%25H:%25M%22)%0A else:%0A return t.strftime(%22%25d.%25m.%25Y klo. %25H:%25M%22)%0A%0Adef format_single(t):%0A return format_single_helper(t).capitalize()%0A%0Adef format_between(t1, t2):%0A now = datetime.now()%0A today = todayhelper()%0A tomorrow = today + timedelta(days=1)%0A the_day_after_tomorrow = today + timedelta(days=2) # Must honor the movie!%0A %0A if t1 %3C now and t2 %3E now:%0A left = t2-now%0A l_hours = int(left.total_seconds() / timedelta(hours=1).total_seconds())%0A l_minutes = int((left.total_seconds() - timedelta(hours=l_hours).total_seconds()) / 60)%0A if(l_hours == 0):%0A return %22Menossa, aikaa j%C3%A4ljell%C3%A4 %22 + str(l_minutes) + %22 minuuttia%22%0A else:%0A return %22Menossa, aikaa j%C3%A4ljell%C3%A4 %22 + str(l_hours) + %22 tuntia ja %22 + str(l_minutes) + %22 minuuttia%22%0A elif t1 %3E now and t1 %3C today+timedelta(days=3):%0A return %22Alkaa %22 + format_single_helper(t1) + %22 ja p%C3%A4%C3%A4ttyy %22 + format_single_helper(t2)%0A else:%0A return %22Alkaa %22 + t1.strftime(%22%25d.%25m.%25Y %25H:%25M%22) + %22 ja p%C3%A4%C3%A4ttyy %22 + t2.strftime(%22%25d.%25m.%25Y %25H:%25M%22) + %22.%22%0A
|
|
bca4a0a0dda95306fe126191166e733c7ccea3ee
|
Add staff permissions for backup models
|
nodeconductor/backup/perms.py
|
nodeconductor/backup/perms.py
|
Python
| 0 |
@@ -0,0 +1,231 @@
+from nodeconductor.core.permissions import StaffPermissionLogic%0A%0A%0APERMISSION_LOGICS = (%0A ('backup.BackupSchedule', StaffPermissionLogic(any_permission=True)),%0A ('backup.Backup', StaffPermissionLogic(any_permission=True)),%0A)%0A
|
|
8f2d421242da11ab2b4fc3482ce6de5480b20070
|
Improve documentation
|
bears/c_languages/ClangComplexityBear.py
|
bears/c_languages/ClangComplexityBear.py
|
from clang.cindex import Index, CursorKind
from coalib.bears.LocalBear import LocalBear
from coalib.results.Result import Result
from coalib.results.SourceRange import SourceRange
from bears.c_languages.ClangBear import clang_available
class ClangComplexityBear(LocalBear):
"""
Calculates cyclomatic complexity of each function and displays it to the
user.
"""
check_prerequisites = classmethod(clang_available)
decisive_cursor_kinds = {
CursorKind.IF_STMT, CursorKind.WHILE_STMT, CursorKind.FOR_STMT,
CursorKind.DEFAULT_STMT, CursorKind.CASE_STMT}
def function_key_points(self, cursor, top_function_level=False):
"""
Calculates number of function's decision points and exit points.
:param top_function_level: Whether cursor is in the top level of
the function.
"""
decisions, exits = 0, 0
for child in cursor.get_children():
if child.kind in self.decisive_cursor_kinds:
decisions += 1
elif child.kind == CursorKind.RETURN_STMT:
exits += 1
if top_function_level:
# There is no point to move forward, so just return.
return decisions, exits
child_decisions, child_exits = self.function_key_points(child)
decisions += child_decisions
exits += child_exits
if top_function_level:
# Implicit return statement.
exits += 1
return decisions, exits
def complexities(self, cursor, filename):
"""
Calculates cyclomatic complexities of functions.
"""
file = cursor.location.file
if file is not None and file.name != filename:
# There is nothing to do in another file.
return
if cursor.kind == CursorKind.FUNCTION_DECL:
child = next((child for child in cursor.get_children()
if child.kind != CursorKind.PARM_DECL),
None)
if child:
decisions, exits = self.function_key_points(child, True)
complexity = max(1, decisions - exits + 2)
yield cursor, complexity
else:
for child in cursor.get_children():
yield from self.complexities(child, filename)
def run(self, filename, file, max_complexity: int=8):
"""
Calculates cyclomatic complexity of functions in file.
:param max_complexity: Maximum cyclomatic complexity that is
considered to be normal. The value of 10 had
received substantial corroborating evidence.
But the general recommendation: "For each
module, either limit cyclomatic complexity to
[the agreed-upon limit] or provide a written
explanation of why the limit was exceeded."
"""
root = Index.create().parse(filename).cursor
for cursor, complexity in self.complexities(root, filename):
if complexity > max_complexity:
affected_code = (SourceRange.from_clang_range(cursor.extent),)
yield Result(
self,
"The function '{function}' should be simplified. Its "
"cyclomatic complexity is {complexity} which exceeds "
"maximal recommended value "
"of {rec_value}.".format(
function=cursor.displayname,
complexity=complexity,
rec_value=max_complexity),
affected_code=affected_code,
additional_info=(
"The cyclomatic complexity is a metric that measures "
"how complicated a function is by counting branches "
"and exits of each function.\n\n"
"Your function seems to be complicated and should be "
"refactored so that it can be understood by other "
"people easily.\n\nSee "
"<http://www.wikiwand.com/en/Cyclomatic_complexity>"
" for more information."))
|
Python
| 0 |
@@ -229,16 +229,27 @@
vailable
+, ClangBear
%0A%0A%0Aclass
@@ -380,24 +380,62 @@
er.%0A %22%22%22%0A
+%0A LANGUAGES = ClangBear.LANGUAGES%0A%0A
check_pr
@@ -2511,33 +2511,84 @@
%22%22%0A C
-alculates
+heck for all functions if they are too complicated using the
cyclomatic
@@ -2578,32 +2578,40 @@
g the cyclomatic
+%0A
complexity of f
@@ -2610,28 +2610,123 @@
ity
-of functions in file
+metric.%0A%0A You can read more about this metric at%0A %3Chttps://www.wikiwand.com/en/Cyclomatic_complexity%3E
.%0A%0A
|
54b66e132137eb6abea0a5ae6571dbc52e309b59
|
change all libraries to have module_main of 'index', and add an index.js if it doesn't have one
|
migrations/011-ensure_library_main_module.py
|
migrations/011-ensure_library_main_module.py
|
Python
| 0.000001 |
@@ -0,0 +1,618 @@
+from jetpack.models import PackageRevision%0A%0ALIB_MODULE_MAIN = 'index'%0A%0Alibs = PackageRevision.objects.filter(package__type='l', module_main='main')%0A .select_related('package', 'modules')%0A%0Alibs.update(module_main=LIB_MODULE_MAIN)%0A%0Amain_per_package = %7B%7D%0A%0Afor revision in libs:%0A if revision.modules.filter(filename=LIB_MODULE_MAIN).count() == 0:%0A mod = main_per_package.get(revision.package_id)%0A if not mod:%0A mod = Module(filename=LIB_MODULE_MAIN, author=revision.author)%0A mod.save()%0A main_per_package%5Brevision.package_id%5D = mod%0A%0A revision.modules.add(mod)%0A
|
|
2fda10a83aa5a4d3080a0ce8751e28a18fc9a3e0
|
Add two-point example to serve as a regression test for gridline/plot distinguishing
|
examples/two_point.py
|
examples/two_point.py
|
Python
| 0.000004 |
@@ -0,0 +1,567 @@
+%22%22%22%0ADemonstrates plotting multiple linear features with a single %60%60ax.pole%60%60 call.%0A%0AThe real purpose of this example is to serve as an implicit regression test for%0Asome oddities in the way axes grid lines are handled in matplotlib and%0Amplstereonet. A 2-vertex line can sometimes be confused for an axes grid line,%0Aand they need different handling on a stereonet.%0A%22%22%22%0Aimport matplotlib.pyplot as plt%0Aimport mplstereonet%0A%0Afig, ax = mplstereonet.subplots(figsize=(7,7))%0Astrike = %5B200, 250%5D%0Adip = %5B50, 60%5D%0Aax.pole(strike, dip, 'go', markersize=10)%0Aax.grid()%0Aplt.show()%0A%0A
|
|
ee85acb7f9f3af91db3bfb4bf766636883f07685
|
Add an extra test for the OpalSerializer
|
opal/tests/test_core_views.py
|
opal/tests/test_core_views.py
|
Python
| 0.000001 |
@@ -0,0 +1,297 @@
+%22%22%22%0AUnittests for opal.core.views%0A%22%22%22%0Afrom opal.core import test%0A%0Afrom opal.core import views%0A%0Aclass SerializerTestCase(test.OpalTestCase):%0A%0A def test_serializer_default_will_super(self):%0A s = views.OpalSerializer()%0A with self.assertRaises(TypeError):%0A s.default(None)%0A
|
|
1fdffc42c7ff7ea4339a58e8a19ffa07253e4149
|
Add script to resolve conflicts
|
resolveconflicts.py
|
resolveconflicts.py
|
Python
| 0.000001 |
@@ -0,0 +1,2905 @@
+# Copyright (C) 2014 Igor Tkach%0A#%0A# This Source Code Form is subject to the terms of the Mozilla Public%0A# License, v. 2.0. If a copy of the MPL was not distributed with this%0A# file, You can obtain one at http://mozilla.org/MPL/2.0/.%0A%0Aimport argparse%0Aimport couchdb%0Afrom urlparse import urlparse%0A%0A%0Adef parse_args():%0A argparser = argparse.ArgumentParser()%0A argparser.add_argument('couch_url')%0A argparser.add_argument('-s', '--start')%0A return argparser.parse_args()%0A%0A%0Adef mkclient(couch_url):%0A parsed_url = urlparse(couch_url)%0A couch_db = parsed_url.path.lstrip('/')%0A server_url = parsed_url.scheme + '://'+ parsed_url.netloc%0A server = couchdb.Server(server_url)%0A username = parsed_url.username%0A password = parsed_url.password%0A print %22User %25s%25s at %25s, database %25s%22 %25 (%0A username,%0A '' if password else ' (no password)',%0A server.resource.url,%0A couch_db)%0A if password:%0A server.resource.credentials = (username, password)%0A return server%5Bcouch_db%5D%0A%0A%0Adef main():%0A args = parse_args()%0A db = mkclient(args.couch_url)%0A viewoptions = %7B%7D%0A if args.start:%0A viewoptions%5B'startkey'%5D = args.start%0A viewoptions%5B'startkey_docid'%5D = args.start%0A for row in db.iterview('_all_docs', 100, **viewoptions):%0A doc = db.get(row.id, conflicts=True)%0A conflicts = doc.get('_conflicts')%0A if conflicts:%0A best_mw_revid = doc%5B'parse'%5D%5B'revid'%5D%0A docs = %5Bdoc%5D%0A best_doc = doc%0A print row.id, '%5Cn', doc.rev, best_mw_revid, conflicts%0A all_aliases = set(doc.get('aliases', ()))%0A aliase_count = len(all_aliases)%0A for conflict_rev in conflicts:%0A conflict_doc = db.get(row.id, rev=conflict_rev)%0A docs.append(conflict_doc)%0A conflict_mw_revid = conflict_doc%5B'parse'%5D%5B'revid'%5D%0A #print 'conflict mw revid:', conflict_mw_revid%0A if conflict_mw_revid %3E best_mw_revid:%0A best_mw_revid = conflict_mw_revid%0A best_doc = conflict_doc%0A aliases = set(doc.get('aliases', ()))%0A all_aliases.update(aliases)%0A #print all_aliases%0A new_aliases_count = len(all_aliases) - aliase_count%0A #print 'New aliases found in conflict:', new_aliases_count%0A #print 'Best doc: ', best_doc.rev%0A if new_aliases_count %3E 0:%0A print '+A', doc.id%0A if best_doc.rev != doc.rev %3E 0:%0A print '+R', doc.id%0A%0A for doc in docs:%0A if doc.rev == best_doc.rev:%0A print 'Keeping ', doc.rev%0A doc%5B'aliases'%5D = list(all_aliases)%0A db.save(doc)%0A else:%0A print 'Discarding ', doc.rev%0A db.delete(doc)%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
ff6040e00aaa0b0b93f96e0d03caabbc02d08f00
|
Fix import order in openstack/common/lockutils.py
|
openstack/common/lockutils.py
|
openstack/common/lockutils.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import functools
import os
import shutil
import tempfile
import time
import weakref
from eventlet import semaphore
from openstack.common import cfg
from openstack.common.gettextutils import _
from openstack.common import fileutils
from openstack.common import log as logging
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
default=os.path.abspath(os.path.join(os.path.dirname(__file__),
'../')),
help='Directory to use for lock files')
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
class _InterProcessLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def __enter__(self):
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
return self
except IOError, e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.unlock()
self.lockfile.close()
except IOError:
LOG.exception(_("Could not release the acquired lock `%s`"),
self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_InterProcessLock):
def trylock(self):
msvcrt.locking(self.lockfile, msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile, msvcrt.LK_UNLCK, 1)
class _PosixLock(_InterProcessLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
else:
import fcntl
InterProcessLock = _PosixLock
_semaphores = weakref.WeakValueDictionary()
def synchronized(name, lock_file_prefix, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the bar method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix. The prefix should end with a hyphen ('-') if specified.
The external keyword argument denotes whether this lock should work across
multiple processes. This means that if two different workers both run a
a method decorated with @synchronized('mylock', external=True), only one
of them will execute at a time.
The lock_path keyword argument is used to specify a special location for
external lock files to live. If nothing is set, then CONF.lock_path is
used as a default.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
# NOTE(soren): If we ever go natively threaded, this will be racy.
# See http://stackoverflow.com/questions/5390569/dyn
# amically-allocating-and-destroying-mutexes
sem = _semaphores.get(name, semaphore.Semaphore())
if name not in _semaphores:
# this check is not racy - we're already holding ref locally
# so GC won't remove the item and there was no IO switch
# (only valid in greenthreads)
_semaphores[name] = sem
with sem:
LOG.debug(_('Got semaphore "%(lock)s" for method '
'"%(method)s"...'), {'lock': name,
'method': f.__name__})
if external and not CONF.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s" for '
'method "%(method)s"...'),
{'lock': name, 'method': f.__name__})
cleanup_dir = False
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path
if not local_lock_path:
local_lock_path = CONF.lock_path
if not local_lock_path:
cleanup_dir = True
local_lock_path = tempfile.mkdtemp()
if not os.path.exists(local_lock_path):
cleanup_dir = True
fileutils.ensure_tree(local_lock_path)
# NOTE(mikal): the lock name cannot contain directory
# separators
safe_name = name.replace(os.sep, '_')
lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
lock_file_path = os.path.join(local_lock_path,
lock_file_name)
try:
lock = InterProcessLock(lock_file_path)
with lock:
LOG.debug(_('Got file lock "%(lock)s" at %(path)s '
'for method "%(method)s"...'),
{'lock': name,
'path': lock_file_path,
'method': f.__name__})
retval = f(*args, **kwargs)
finally:
# NOTE(vish): This removes the tempdir if we needed
# to create one. This is used to cleanup
# the locks left behind by unit tests.
if cleanup_dir:
shutil.rmtree(local_lock_path)
else:
retval = f(*args, **kwargs)
return retval
return inner
return wrap
|
Python
| 0.00055 |
@@ -857,69 +857,69 @@
mmon
-.gettextutils import _%0Afrom openstack.common import fileutils
+ import fileutils%0Afrom openstack.common.gettextutils import _
%0Afro
|
a0a2017e05af986cd0a7207c429e7dc5e8b3fcd2
|
Add missing tests for Variable
|
tests/test_solver_variable.py
|
tests/test_solver_variable.py
|
Python
| 0.000005 |
@@ -0,0 +1,1728 @@
+from gaphas.solver import Variable%0A%0A%0Adef test_equality():%0A v = Variable(3)%0A w = Variable(3)%0A o = Variable(2)%0A%0A assert v == 3%0A assert 3 == v%0A assert v == w%0A assert not v == o%0A%0A assert v != 2%0A assert 2 != v%0A assert not 3 != v%0A assert v != o%0A%0A%0Adef test_add_to_variable():%0A v = Variable(3)%0A%0A assert v + 1 == 4%0A assert v - 1 == 2%0A assert 1 + v == 4%0A assert 4 - v == 1%0A%0A%0Adef test_add_to_variable_with_variable():%0A v = Variable(3)%0A o = Variable(1)%0A%0A assert v + o == 4%0A assert v - o == 2%0A%0A%0Adef test_mutiplication():%0A v = Variable(3)%0A%0A assert v * 2 == 6%0A assert v / 2 == 1.5%0A assert v // 2 == 1%0A%0A assert 2 * v == 6%0A assert 4.5 / v == 1.5%0A assert 4 // v == 1%0A%0A%0Adef test_mutiplication_with_variable():%0A v = Variable(3)%0A o = Variable(2)%0A%0A assert v * o == 6%0A assert v / o == 1.5%0A assert v // o == 1%0A%0A%0Adef test_comparison():%0A v = Variable(3)%0A%0A assert v %3E 2%0A assert v %3C 4%0A assert v %3E= 2%0A assert v %3E= 3%0A assert v %3C= 4%0A assert v %3C= 3%0A%0A assert not v %3E 3%0A assert not v %3C 3%0A assert not v %3C= 2%0A assert not v %3E= 4%0A%0A%0Adef test_inverse_comparison():%0A v = Variable(3)%0A%0A assert 4 %3E v%0A assert 2 %3C v%0A assert 4 %3E= v%0A assert 3 %3E= v%0A assert 2 %3C= v%0A assert 3 %3C= v%0A%0A assert not 3 %3E v%0A assert not 3 %3C v%0A assert not 4 %3C= v%0A assert not 2 %3E= v%0A%0A%0Adef test_power():%0A v = Variable(3)%0A o = Variable(2)%0A%0A assert v ** 2 == 9%0A assert 2 ** v == 8%0A assert v ** o == 9%0A%0A%0Adef test_modulo():%0A v = Variable(3)%0A o = Variable(2)%0A%0A assert v %25 2 == 1%0A assert 4 %25 v == 1%0A assert v %25 o == 1%0A assert divmod(v, 2) == (1, 1)%0A assert divmod(4, v) == (1, 1)%0A assert divmod(v, o) == (1, 1)%0A
|
|
865356c5b7bbec2b9412ffd3d2a39fea19e4b01a
|
Create getcounts.py
|
usbcounter/getcounts.py
|
usbcounter/getcounts.py
|
Python
| 0.000001 |
@@ -0,0 +1,53 @@
+import serial%0Aimport json%0Aimport os, sys%0Aimport time%0A
|
|
5063cabcd1d85b31868308e376c0f62588c5e3cc
|
convert bands to list
|
landsat/landsat.py
|
landsat/landsat.py
|
#!/usr/bin/env python
# Landsat Util
# License: CC0 1.0 Universal
from __future__ import print_function
import argparse
import textwrap
import json
from dateutil.parser import parse
from downloader import Downloader
from search import Search
from utils import reformat_date, convert_to_integer_list, timer, exit
from mixins import VerbosityMixin
from image import Process
DESCRIPTION = """Landsat-util is a command line utility that makes it easy to
search, download, and process Landsat imagery.
Commands:
Search:
landsat.py search [-p --pathrow] [--lat] [--lon] [-l LIMIT] [-s START] [-e END] [-c CLOUD] [-h]
optional arguments:
-p, --pathrow Paths and Rows in order separated by comma. Use quotes ("001").
Example: path,row,path,row 001,001,190,204
--lat Latitude
--lon Longitude
-l LIMIT, --limit LIMIT
Search return results limit default is 100
-s START, --start START
Start Date - Most formats are accepted e.g.
Jun 12 2014 OR 06/12/2014
-e END, --end END End Date - Most formats are accepted e.g.
Jun 12 2014 OR 06/12/2014
-c CLOUD, --cloud CLOUD
Maximum cloud percentage. Default: 20 perct
-h, --help Show this help message and exit
Download:
landsat download sceneID [sceneID ...] [-h] [-b --bands]
positional arguments:
sceneID Provide Full sceneIDs. You can add as many sceneIDs as you wish
Example: landast download LC81660392014196LGN00
optional arguments:
-b --bands If you specify bands, landsat-util will try to download the band from S3.
If the band does not exist, an error is returned
-h, --help Show this help message and exit
Process:
landsat.py process path [-h] [-b --bands] [-p --pansharpen]
positional arguments:
path Path to the landsat image folder or zip file
optional arguments:
-b --bands Specify bands. The bands should be written in sequence with no spaces
Default: Natural colors (432)
Example --bands 432
-p --pansharpen Whether to also pansharpen the process image.
Pansharpening takes a long time
-v, --verbose Show verbose output
-h, --help Show this help message and exit
"""
def args_options():
parser = argparse.ArgumentParser(prog='landsat',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(DESCRIPTION))
subparsers = parser.add_subparsers(help='Landsat Utility',
dest='subs')
# Search Logic
parser_search = subparsers.add_parser('search',
help='Search Landsat metdata')
# Global search options
parser_search.add_argument('-l', '--limit', default=100, type=int,
help='Search return results limit\n'
'default is 100')
parser_search.add_argument('-s', '--start',
help='Start Date - Most formats are accepted '
'e.g. Jun 12 2014 OR 06/12/2014')
parser_search.add_argument('-e', '--end',
help='End Date - Most formats are accepted '
'e.g. Jun 12 2014 OR 06/12/2014')
parser_search.add_argument('-c', '--cloud', type=float, default=20.0,
help='Maximum cloud percentage '
'default is 20 perct')
parser_search.add_argument('-p', '--pathrow',
help='Paths and Rows in order separated by comma. Use quotes ("001").'
'Example: path,row,path,row 001,001,190,204')
parser_search.add_argument('--lat', type=float, help='The latitude')
parser_search.add_argument('--lon', type=float, help='The longitude')
parser_download = subparsers.add_parser('download',
help='Download images from Google Storage')
parser_download.add_argument('scenes',
metavar='sceneID',
nargs="+",
help="Provide Full sceneID, e.g. LC81660392014196LGN00")
parser_download.add_argument('-b', '--bands', help='If you specify bands, landsat-util will try to download '
'the band from S3. If the band does not exist, an error is returned')
parser_process = subparsers.add_parser('process',
help='Process Landsat imagery')
parser_process.add_argument('path',
help='Path to the compressed image file')
parser_process.add_argument('--pansharpen', action='store_true',
help='Whether to also pansharpen the process '
'image. Pan sharpening takes a long time')
parser_process.add_argument('--bands', help='specify band combinations. Default is 432'
'Example: --bands 321')
parser_process.add_argument('-v', '--verbose', action='store_true',
help='Turn on verbosity')
return parser
def main(args):
"""
Main function - launches the program
"""
v = VerbosityMixin()
if args:
if args.subs == 'process':
verbose = True if args.verbose else False
try:
bands = convert_to_integer_list(args.bands)
p = Process(args.path, bands=bands, verbose=verbose)
except IOError:
exit("Zip file corrupted", 1)
stored = p.run(args.pansharpen)
exit("The output is stored at %s." % stored)
elif args.subs == 'search':
try:
if args.start:
args.start = reformat_date(parse(args.start))
if args.end:
args.end = reformat_date(parse(args.end))
except TypeError:
exit("You date format is incorrect. Please try again!", 1)
s = Search()
try:
lat = float(args.lat) if args.lat else None
lon = float(args.lon) if args.lon else None
except ValueError:
exit("The latitude and longitude values must be valid numbers", 1)
result = s.search(row_paths=args.pathrow,
lat=lat,
lon=lon,
limit=args.limit,
start_date=args.start,
end_date=args.end,
cloud_max=args.cloud)
if result['status'] == 'SUCCESS':
v.output('%s items were found' % result['total'], normal=True, arrow=True)
if result['total'] > 100:
exit('Over 100 results. Please narrow your search', 1)
else:
v.output(json.dumps(result, sort_keys=True, indent=4), normal=True, color='green')
exit('Search completed!')
elif result['status'] == 'error':
exit(result['message'], 1)
elif args.subs == 'download':
d = Downloader()
bands = None
if args.bands:
bands = args.bands.replace(' ', '').split(',')
d.download(args.scenes, bands)
def __main__():
global parser
parser = args_options()
args = parser.parse_args()
with timer():
main(args)
if __name__ == "__main__":
try:
__main__()
except KeyboardInterrupt:
exit('Received Ctrl + C... Exiting! Bye.', 1)
# except:
# exit('Unexpected Error: %s' % (sys.exc_info()[0]), 1)
|
Python
| 0.999999 |
@@ -7994,17 +7994,16 @@
oader()%0A
-%0A
@@ -8010,154 +8010,68 @@
-bands = None%0A if args.bands:%0A bands = args.bands.replace(' ', '').sp
+d.download(args.scenes, convert_to_integer_
li
+s
t(
-',')%0A%0A d.download(args.scenes,
+args.
bands)
+)
%0A%0A%0Ad
|
993b1af160e6ed7886c2c95770683fae72332aed
|
remove __debug__
|
direct/src/task/Task.py
|
direct/src/task/Task.py
|
""" This module exists temporarily as a gatekeeper between
TaskOrig.py, the original Python implementation of the task system,
and TaskNew.py, the new C++ implementation. """
wantNewTasks = False
if __debug__:
from pandac.PandaModules import ConfigVariableBool
wantNewTasks = ConfigVariableBool('want-new-tasks', False).getValue()
if wantNewTasks:
from TaskNew import *
else:
from TaskOrig import *
|
Python
| 0.000105 |
@@ -173,64 +173,35 @@
%22%22%0A%0A
-wantNewTasks = False%0Aif __debug__:%0A from pandac.Panda
+from pandac.libpandaexpress
Modu
@@ -230,20 +230,16 @@
bleBool%0A
-
wantNewT
|
85cbec4f398c49a4903c7370f74deeae3d5adabf
|
Create ShowData.py
|
ShowData.py
|
ShowData.py
|
Python
| 0 |
@@ -0,0 +1,2710 @@
+%22%22%22%0AThe MIT License (MIT)%0A%0ACopyright (c) %3C2016%3E %3CLarry McCaig (aka: Larz60+ aka: Larz60p)%3E%0A%0APermission is hereby granted, free of charge, to any person obtaining a%0Acopy of this software and associated documentation files (the %22Software%22),%0Ato deal in the Software without restriction, including without limitation%0Athe rights to use, copy, modify, merge, publish, distribute, sublicense,%0Aand/or sell copies of the Software, and to permit persons to whom the%0ASoftware is furnished to do so, subject to the following conditions:%0A%0AThe above copyright notice and this permission notice shall be included in%0Aall copies or substantial portions of the Software.%0A%0ATHE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR%0AIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,%0AFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE%0AAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER%0ALIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,%0AOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN%0ATHE SOFTWARE.%0A%0A%22%22%22%0Aimport ReadRecord as RdRec%0A%0A%0Aclass ShowData:%0A def __init__(self):%0A self.rr = RdRec.ReadRecord('StockData.json')%0A self.stock_market_record = self.rr.read_data_file()%0A%0Adef show_data(self):%0A stkmktrec = self.stock_market_record%0A # get a list of field names:%0A print('Record fields: %7B%7D'.format(stkmktrec._fields))%0A%0A # List entire record%0A print('%5CnEntire record: %7B%7D'.format(stkmktrec))%0A%0A # Get individual field%0A print('%5Cndbtabledesc: %7B%7D'.format(stkmktrec.dbtabledesc))%0A%0A # Show database column entries%0A print('%5Cndatabase column 0: %7B%7D'.format(stkmktrec.columns%5B0%5D))%0A print('database column 1: %7B%7D'.format(stkmktrec.columns%5B1%5D))%0A print('database column 2: %7B%7D'.format(stkmktrec.columns%5B2%5D))%0A%0A # Column data by key:%0A for n in range(len(stkmktrec.columns)):%0A column = stkmktrec.columns%5Bn%5D%0A print('%5CnColumn %7B%7D all: %7B%7D'.format(n, column))%0A print('Column data %7B%7D field_name: %7B%7D'.format(n, column.field_name))%0A print('Column data %7B%7D db_column_name: %7B%7D'.format(n, column.db_column_name))%0A print('Column data %7B%7D db_column_desc: %7B%7D'.format(n, column.db_column_desc))%0A print('Column data %7B%7D db_column_type: %7B%7D'.format(n, column.db_column_type))%0A%0A # Using get_field_item%0A print('%5CnUsing get_field_item - Column 1, db_column_desc: %7B%7D'%0A .format(self.rr.get_field_item(1, itemname='db_column_desc')))%0A # same with bad data%0A print('With bad data you get: %7B%7D'%0A .format(self.rr.get_field_item(1, itemname='donkykong')))%0A%0A%0Aif __name__ == '__main__':%0A sd = ShowData()%0A sd.show_data()%0A
|
|
64130f988f2154870db540244a399a8297a103e9
|
move hardcoded URL from email script to model definition.
|
dj/scripts/email_url.py
|
dj/scripts/email_url.py
|
#!/usr/bin/python
# email_url.py
# emails the video URL to the presenters
import itertools
from pprint import pprint
from email_ab import email_ab
class email_url(email_ab):
ready_state = 7
subject_template = "[{{ep.show.name}}] Video up: {{ep.name}}"
body_body = """
The video is posted:
{% for url in urls %} {{url}}
{% endfor %}
Look at it, make sure the title is spelled right and the audio sounds reasonable.
If you are satisfied, tweet it, blog it, whatever it. No point in making videos if no one watches them.
To approve it click the Approve button at
https://veyepar.nextdayvideo.com/main/approve/{{ep.id}}/{{ep.slug}}/{{ep.edit_key}}/
As soon as you or someone approves your video, it will be tweeted on @NextDayVideo{% if ep.show.client.tweet_prefix %} tagged {{ep.show.client.tweet_prefix}}{% endif %}. It will also be sent to the event organizers in hopes that they add it to the event website.
{% if ep.twitter_url %}
It has been tweeted: {{ ep.twitter_url }}
Re-tweet it, blog it, whatever it. No point in making videos if no one watches them.
{% endif %}
"""
def context(self, ep):
ctx = super(email_url, self).context(ep)
# dig around for URLs that might be relevant
urls = filter( None,
[ep.public_url,
ep.host_url,
ep.archive_ogv_url,
ep.archive_mp4_url] )
ctx['urls'] = urls
ctx['py_name'] = "email_url.py"
return ctx
if __name__ == '__main__':
p=email_url()
p.main()
|
Python
| 0 |
@@ -588,93 +588,26 @@
%0A
- https://veyepar.nextdayvideo.com/main/approve/%7B%7Bep.id%7D%7D/%7B%7Bep.slug%7D%7D/%7B%7Bep.edit_key%7D%7D/
+%7B%7Bep.approve_url%7D%7D
%0A%0AAs
|
ce47fec10ccda45550625221c64322d89622c707
|
Add libjpeg.gyp that wraps third_party/externals/libjpeg/libjpeg.gyp Review URL: https://codereview.appspot.com/5848046
|
gyp/libjpeg.gyp
|
gyp/libjpeg.gyp
|
Python
| 0.000001 |
@@ -0,0 +1,513 @@
+# Copyright 2012 The Android Open Source Project%0A#%0A# Use of this source code is governed by a BSD-style license that can be%0A# found in the LICENSE file.%0A%0A# Depend on this wrapper to pick up libjpeg from third_party%0A%0A%7B%0A 'targets': %5B%0A %7B%0A 'target_name': 'libjpeg',%0A 'type': 'none',%0A 'dependencies': %5B%0A '../third_party/externals/libjpeg/libjpeg.gyp:libjpeg',%0A %5D,%0A %7D,%0A %5D,%0A%7D%0A%0A# Local Variables:%0A# tab-width:2%0A# indent-tabs-mode:nil%0A# End:%0A# vim: set expandtab tabstop=2 shiftwidth=2:%0A
|
|
d4ff515df7e12d26c759adfafcacf82e47da71a1
|
Add util
|
snapchat_fs/util.py
|
snapchat_fs/util.py
|
Python
| 0.000051 |
@@ -0,0 +1,580 @@
+#!/usr/bin/env python%0A%0A%22%22%22%0Autil.py provides a set of nice utility functions that support the snapchat_fs pkg%0A%22%22%22%0A%0A__author__ = %22Alex Clemmer, Chad Brubaker%22%0A__copyright__ = %22Copyright 2013, Alex Clemmer and Chad Brubaker%22%0A__credits__ = %5B%22Alex Clemmer%22, %22Chad Brubaker%22%5D%0A%0A__license__ = %22MIT%22%0A__version__ = %220.1%22%0A__maintainer__ = %22Alex Clemmer%22%0A__email__ = %[email protected]%22%0A__status__ = %22Prototype%22%0A%0A%0Adef bold(text):%0A return '%5C033%5B1m%25s%5C033%5B0m' %25 text%0A%0Adef green(text):%0A return '%5C033%5B1;32m%25s%5C033%5B0m' %25 text%0A%0Adef red(text):%0A return '%5C033%5B1;31m%25s%5C033%5B0m' %25 text%0A%0A
|
|
7d9fd2eed72a2a65744259af1bd8580253f282d3
|
Create a.py
|
abc067/a.py
|
abc067/a.py
|
Python
| 0.000489 |
@@ -0,0 +1,136 @@
+a, b = map(int, input().split())%0A %0Aif a %25 3 == 0 or b %25 3 == 0 or (a + b) %25 3 == 0:%0A print('Possible')%0Aelse:%0A print('Impossible')%0A
|
|
c4d5d04a957fed09228995aa7f84ed19c64e3831
|
Add previously forgotten afterflight utilities module
|
af_utils.py
|
af_utils.py
|
Python
| 0 |
@@ -0,0 +1,2822 @@
+ #Copyright 2013 Aaron Curtis%0A%0A #Licensed under the Apache License, Version 2.0 (the %22License%22);%0A #you may not use this file except in compliance with the License.%0A #You may obtain a copy of the License at%0A%0A #http://www.apache.org/licenses/LICENSE-2.0%0A%0A #Unless required by applicable law or agreed to in writing, software%0A #distributed under the License is distributed on an %22AS IS%22 BASIS,%0A #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A #See the License for the specific language governing permissions and%0A #limitations under the License.%0A%0Aimport calendar, datetime, re, numpy%0A%0Adef dt2jsts(datetime):%0A %22%22%22%0A Given a python datetime, convert to javascript timestamp format (milliseconds since Jan 1 1970).%0A Do so with microsecond precision, and without adding any timezone offset.%0A %22%22%22%0A return calendar.timegm(datetime.timetuple())*1e3+datetime.microsecond/1e3%0A%0Adef logpath2dt(filepath):%0A %22%22%22%0A given a dataflashlog in the format produced by Mission Planner,%0A return a datetime which says when the file was downloaded from the APM%0A %22%22%22%0A return datetime.datetime.strptime(re.match(r'.*/(.*) .*$',filepath).groups()%5B0%5D,'%25Y-%25m-%25d %25H-%25M')%0A%0Aclass UTC(datetime.tzinfo):%0A %22%22%22%0A No timezones are provided in python stdlib (gaargh) so we have to make one here%0A %22%22%22%0A%0A def utcoffset(self, dt):%0A return datetime.timedelta(0)%0A%0A def tzname(self, dt):%0A return %22UTC%22%0A%0A def dst(self, dt):%0A return datetime.timedelta(0)%0Autc=UTC()%0A%0Adef cross(series, cross=0, direction='cross'):%0A %22%22%22%0A From http://stackoverflow.com/questions/10475488/calculating-crossing-intercept-points-of-a-series-or-dataframe%0A%0A Given a Series returns all the index values where the data values equal %0A the 'cross' value. %0A%0A Direction can be 'rising' (for rising edge), 'falling' (for only falling %0A edge), or 'cross' for both edges%0A %22%22%22%0A # Find if values are above or bellow yvalue crossing:%0A above=series.values %3E cross%0A below=numpy.logical_not(above)%0A left_shifted_above = above%5B1:%5D%0A left_shifted_below = below%5B1:%5D%0A x_crossings = %5B%5D%0A # Find indexes on left side of crossing point%0A if direction == 'rising':%0A idxs = (left_shifted_above & below%5B0:-1%5D).nonzero()%5B0%5D%0A elif direction == 'falling':%0A idxs = (left_shifted_below & above%5B0:-1%5D).nonzero()%5B0%5D%0A else:%0A rising = left_shifted_above & below%5B0:-1%5D%0A falling = left_shifted_below & above%5B0:-1%5D%0A idxs = (rising %7C falling).nonzero()%5B0%5D%0A%0A # Calculate x crossings with interpolation using formula for a line:%0A x1 = series.index.values%5Bidxs%5D%0A x2 = series.index.values%5Bidxs+1%5D%0A y1 = series.values%5Bidxs%5D%0A y2 = series.values%5Bidxs+1%5D%0A x_crossings = (cross-y1)*(x2-x1)/(y2-y1) + x1%0A%0A return x_crossings%0A
|
|
f68b0bb1e1f10b10e58057f60e17377f027690f8
|
add a util function for ungzip.
|
web/my_util/compress.py
|
web/my_util/compress.py
|
Python
| 0 |
@@ -0,0 +1,274 @@
+import gzip%0Afrom StringIO import StringIO%0A%0Adef ungzip(resp):%0A if resp.info().get('Content-Encoding') == 'gzip':%0A buf = StringIO(resp.read())%0A f = gzip.GzipFile(fileobj=buf)%0A data = f.read()%0A%0A return data%0A%0A else:%0A return resp.read()%0A%0A
|
|
f66b799a22f2c74b88f867266c2e51eda1377b1c
|
Create find_the_mine.py
|
find_the_mine.py
|
find_the_mine.py
|
Python
| 0.00075 |
@@ -0,0 +1,229 @@
+#Kunal Gautam%0A#Codewars : @Kunalpod%0A#Problem name: Find the Mine!%0A#Problem level: 6 kyu%0A%0Adef mineLocation(field):%0A for i in range(len(field)):%0A for j in range(len(field)):%0A if field%5Bi%5D%5Bj%5D==1: return %5Bi,j%5D%0A
|
|
febc735e79f3cc1b5f2e5fe2882bf28c458f638a
|
Initialize init file
|
wikilink/db/__init__.py
|
wikilink/db/__init__.py
|
Python
| 0 |
@@ -0,0 +1,468 @@
+%22%22%22%0A%09wikilink%0A%09~~~~~~~~%0A%0A%09wiki-link is a web-scraping application to find minimum number %0A%09of links between two given wiki pages. %0A%0A :copyright: (c) 2016 - 2018 by Tran Ly VU. All Rights Reserved.%0A :license: Apache License 2.0.%0A%22%22%22%0A__all__ = %5B%22db%22, %22base%22, %22page%22, %22link%22%5D%0A__author__ = %22Tran Ly Vu ([email protected])%22%0A__version__ = %221.2.0%22%0A__copyright__ = %22Copyright (c) 2016 - 2019 Tran Ly Vu. All Rights Reserved.%22%0A__license__ = %22Apache License 2.0%22%0A%0A
|
|
3fdb673977de57e5555eafb18e36544f3ea8c056
|
Solve the absurd problem with an absurd file
|
selection/absurd.py
|
selection/absurd.py
|
Python
| 0.999963 |
@@ -0,0 +1,371 @@
+import kmeans%0Aimport numpy as np%0Akmeans = reload(kmeans)%0A%0An_sample = 100%0Ap_array = %5B%5D%0Afor i in range(n_sample):%0A if i%2510 == 0:%0A print i, %22 / %22, n_sample%0A %0A kmeans = reload(kmeans)%0A p = kmeans.f(10)%0A p_array.append(p)%0A%0A%0A%0Aimport matplotlib.pyplot as plt%0A%0Ap_array = sorted(p_array)%0A%0Ax = np.arange(0, 1, 1./len(p_array));%0Aplt.plot(x, p_array, 'ro')%0A
|
|
c2bce27530f9997bffcb04f80a8d78db65ff98b2
|
Create GPS.py
|
home/kmcgerald/GPS.py
|
home/kmcgerald/GPS.py
|
Python
| 0.000007 |
@@ -0,0 +1,1937 @@
+from time import sleep%0A# The geofence and measure distance methods should be available in MRL %3E 1.0.86%0A%0Agps1 = Runtime.start(%22gps1%22, %22GPS%22)%0A%0Agps1.connect(%22/dev/tty.palmOneGPS-GPSSerialOut%22)%0Asleep(1)%0A%0A# define some points ... %0A# Lets use Nova Labs 1.0%0Alat1 = 38.950829%0Alon1 = -77.339502%0A# and Nova Labs 2.0%0Alat2 = 38.954471 %0Alon2 = -77.338271%0A# and the nearest Metro station%0Alat3 = 38.947254%0Alon3 = -77.337844%0A# and the Sand Trap out back%0Alat4 = 38.954844%0Alon4 = -77.338797%0A%0Adef input():%0A startingAngle = 0%0A Latitude = msg_gps1_publishGGAData.data%5B0%5D%5B2%5D%0A Longitude = msg_gps1_publishGGAData.data%5B0%5D%5B4%5D%0A altitude = msg_gps1_publishGGAData.data%5B0%5D%5B9%5D%0A print %22Lat: %22 + Latitude%0A print %22Long: %22 + Longitude%0A print %22Alt: %22 + altitude + %22%5Cn%22%0A %0A%0A#have python listening to lidar%0Agps1.addListener(%22publishGGAData%22, python.name, %22input%22) %0A%0Aprint %22Ready to receive Data from GPS...%22%0A%0Aprint %22Let's put a 100 meter GeoFence around around Nova Labs 2.0%22%0A# create a point based geofence with a 100m radius%0Ageofence = gps1.setPointGeoFence(lat2, lon2, 100)%0A%0Adistance = gps1.calculateDistance(lon1, lat1, lon2, lat2)%0A%0A# check if a GPS point is inside the fence%0Aif (gps1.checkInside(geofence, lat1, lon1)):%0A print %22Inside the Fence%22%0Aelse:%0A print %22Outside the Fence%22%0Aprint %22Distance (meters): %22,distance,%22 between Nova Labs 1.0 and Nova Labs 2.0%5Cn%22%0A%0Adistance = gps1.calculateDistance(lon2, lat2, lon3, lat3)%0A%0A# check if a GPS point is inside the fence%0Aif (gps1.checkInside(geofence, lat3, lon3)):%0A print %22Inside the Fence%22%0Aelse:%0A print %22Outside the Fence%22%0Aprint %22Distance (meters): %22,distance, %22 between NL 2 and the nearest Metro Station%5Cn%22%0A%0Adistance = gps1.calculateDistance(lon2, lat2, lon4, lat4)%0A%0A# check if a GPS point is inside the fence%0Aif (gps1.checkInside(geofence, lat4, lon4)):%0A print %22Inside the Fence%22%0Aelse:%0A print %22Outside the Fence%22%0Aprint %22Distance (meters): %22,distance, %22between NL 2 and the nearest sand trap%5Cn%22%0A
|
|
ddc80392b17a3fadcbea09f82ea5f6936f0fd459
|
add fbcode_builder_config for mvfst build in oss
|
build/fbcode_builder/specs/mvfst.py
|
build/fbcode_builder/specs/mvfst.py
|
Python
| 0 |
@@ -0,0 +1,538 @@
+#!/usr/bin/env python%0A# Copyright (c) Facebook, Inc. and its affiliates.%0Afrom __future__ import absolute_import%0Afrom __future__ import division%0Afrom __future__ import print_function%0Afrom __future__ import unicode_literals%0A%0Aimport specs.folly as folly%0Aimport specs.fizz as fizz%0A%0A%0Adef fbcode_builder_spec(builder):%0A return %7B%0A 'depends_on': %5Bfolly, fizz%5D,%0A 'steps': %5B%0A builder.fb_github_cmake_install(%0A 'mvfst/build',%0A github_org='facebookincubator',%0A ),%0A %5D,%0A %7D%0A
|
|
2593b23d261eb595b0844868360974bf180b21d8
|
Version bump
|
jrnl/__init__.py
|
jrnl/__init__.py
|
#!/usr/bin/env python
# encoding: utf-8
"""
jrnl is a simple journal application for your command line.
"""
__title__ = 'jrnl'
__version__ = '1.5.1'
__author__ = 'Manuel Ebert'
__license__ = 'MIT License'
__copyright__ = 'Copyright 2013 Manuel Ebert'
from . import Journal
from . import jrnl
from .jrnl import cli
|
Python
| 0.000001 |
@@ -142,17 +142,17 @@
= '1.5.
-1
+2
'%0A__auth
|
d8ff61b72c07a9f0b22e5cbaefe6277bf2697afc
|
Create project.py
|
project_surgery/project.py
|
project_surgery/project.py
|
Python
| 0.000001 |
@@ -0,0 +1,2007 @@
+# -*- coding: utf-8 -*-%0A##############################################################################%0A#%0A# Author: Gideoni Silva (Omnes)%0A# Copyright 2013-2014 Omnes Tecnologia%0A#%0A# This program is free software: you can redistribute it and/or modify%0A# it under the terms of the GNU Affero General Public License as%0A# published by the Free Software Foundation, either version 3 of the%0A# License, or (at your option) any later version.%0A#%0A# This program is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU Affero General Public License for more details.%0A#%0A# You should have received a copy of the GNU Affero General Public License%0A# along with this program. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A#%0A##############################################################################%0A%0Afrom openerp.osv import orm, osv, fields%0A%0Aclass project(orm.Model):%0A _inherit = %22project.project%22%0A _columns = %7B%0A 'doctor_id': fields.many2one(%0A 'res.partner', 'Doctor',%0A domain = %22%5B('is_company','=',False)%5D%22,%0A required=True,change_default=True, select=True, track_visibility='always'%0A ),%0A 'patient_id': fields.many2one(%0A 'res.partner', 'Patient',%0A domain = %22%5B('is_company','=',False)%5D%22,%0A required=True,change_default=True, select=True, track_visibility='always'%0A ),%0A 'hospital_id': fields.many2one(%0A 'res.partner', 'Hospital',%0A domain = %22%5B('is_company','=',True)%5D%22,%0A required=True,change_default=True, select=True, track_visibility='always'),%0A 'box_ids': fields.many2many(%0A 'stock.tracking','project_stock_track_rel','project_id','stock_tracking_id',%0A string='Used Surgical Boxes ',%0A help=%22Selecione as Caixas Cir%C3%BArgicas para a Cirurgia%22%0A )%0A %7D%0A# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:%0A
|
|
e87982d03edeb7c16d3c183309adfff4be50d168
|
Add Qt4 file to start on creating a Qt-based GUI
|
gui/qt.py
|
gui/qt.py
|
Python
| 0 |
@@ -0,0 +1,439 @@
+from lib.version import AMON_VERSION%0Afrom lib.keybase import KeybaseUser%0Afrom lib.gmail import GmailUser%0Afrom lib.addresses import AddressBook%0Aimport lib.gpg as gpg%0A%0Aimport sys%0Aimport logging%0Aimport json%0Afrom PyQt4 import QtGui%0A%0A%0Aclass Amon(QtGui.QMainWindow):%0A def __init__(self):%0A super(Amon, self).__init__()%0A self.keybase_user = KeybaseUser()%0A self.gmail = GmailUser()%0A self.address_book = AddressBook()%0A
|
|
3972c4a16894732db418a2d04f36b5104e0fac86
|
add rms code in own namespace
|
tkp/quality/rms.py
|
tkp/quality/rms.py
|
Python
| 0.000001 |
@@ -0,0 +1,822 @@
+from tkp.utility import nice_format%0A%0A%0Adef rms_invalid(rms, noise, low_bound=1, high_bound=50):%0A %22%22%22%0A Is the RMS value of an image too high?%0A%0A :param rms: RMS value of an image, can be computed with%0A tkp.quality.statistics.rms%0A :param noise: Theoretical noise level of instrument, can be calculated with%0A tkp.lofar.noise.noise_level%0A :param low_bound: multiplied with noise to define lower threshold%0A :param high_bound: multiplied with noise to define upper threshold%0A :returns: True/False%0A %22%22%22%0A if (rms %3C noise * low_bound) or (rms %3E noise * high_bound):%0A ratio = rms / noise%0A return %22rms value (%25s) is %25s times theoretical noise (%25s)%22 %25 %5C%0A (nice_format(rms), nice_format(ratio), nice_format(noise))%0A else:%0A return False
|
|
1613bde53cfda3d38d7e62c6c91f3d6c5407fb9c
|
Add script inspect_checkpoint.py to check if a model checkpoint is corrupted with NaN/inf values
|
inspect_checkpoint.py
|
inspect_checkpoint.py
|
Python
| 0.000001 |
@@ -0,0 +1,1312 @@
+%22%22%22%0ASimple script that checks if a checkpoint is corrupted with any inf/NaN values. Run like this:%0A python inspect_checkpoint.py model.12345%0A%22%22%22%0A%0Aimport tensorflow as tf%0Aimport sys%0Aimport numpy as np%0A%0A%0Aif __name__ == '__main__':%0A if len(sys.argv) != 2:%0A raise Exception(%22Usage: python inspect_checkpoint.py %3Cfile_name%3E%5CnNote: Do not include the .data .index or .meta part of the model checkpoint in file_name.%22)%0A file_name = sys.argv%5B1%5D%0A reader = tf.train.NewCheckpointReader(file_name)%0A var_to_shape_map = reader.get_variable_to_shape_map()%0A%0A finite = %5B%5D%0A all_infnan = %5B%5D%0A some_infnan = %5B%5D%0A%0A for key in sorted(var_to_shape_map.keys()):%0A tensor = reader.get_tensor(key)%0A if np.all(np.isfinite(tensor)):%0A finite.append(key)%0A else:%0A if not np.any(np.isfinite(tensor)):%0A all_infnan.append(key)%0A else:%0A some_infnan.append(key)%0A%0A print %22%5CnFINITE VARIABLES:%22%0A for key in finite: print key%0A%0A print %22%5CnVARIABLES THAT ARE ALL INF/NAN:%22%0A for key in all_infnan: print key%0A%0A print %22%5CnVARIABLES THAT CONTAIN SOME FINITE, SOME INF/NAN VALUES:%22%0A for key in some_infnan: print key%0A%0A print %22%22%0A if not all_infnan and not some_infnan:%0A print %22CHECK PASSED: checkpoint contains no inf/NaN values%22%0A else:%0A print %22CHECK FAILED: checkpoint contains some inf/NaN values%22%0A
|
|
d5e16fdf73eb281da3541fa7a0e3f8792b83faeb
|
bump to 0.3.0
|
tproxy/__init__.py
|
tproxy/__init__.py
|
# -*- coding: utf-8 -
#
# This file is part of tproxy released under the MIT license.
# See the NOTICE for more information.
version_info = (0, 2, 4)
__version__ = ".".join(map(str, version_info))
|
Python
| 0.000019 |
@@ -143,12 +143,12 @@
(0,
-2, 4
+3, 0
)%0A__
|
bb5a94208bb3a96995182b773998dbec4ebf7667
|
Test wrapper py script
|
py_scripts/EoSeval_test.py
|
py_scripts/EoSeval_test.py
|
Python
| 0.000001 |
@@ -0,0 +1,490 @@
+# -*- coding: utf-8 -*-%0A%22%22%22%0ACode description goes in here%0A%22%22%22%0Aimport numpy%0Aimport EoSeq%0Afrom scipy.optimize import curve_fit%0A%0A# Prompt user for filename string%0A# filename = raw_input(%22Please enter a file path for P and V data%22)%0A%0A# Load in data file%0A# data = numpy.loadtxt(filename, delimiter = ',')%0A%0Adata = numpy.loadtxt(%22/Users/Grace/Documents/EoSeval/data/ferropericlase_Mao_2011_2000K.csv%22, delimiter = ',')%0A%0Ainit_params = %5B0,0,0,0%5D%0A%0Atestfunc = BM3EOS(init_params)%0A%0ABM3 = EOS(testfunc)%0A%0A
|
|
9a082b04973a9927014df496aa31f5c05e8be6ca
|
add 143
|
python/143_reorder_list.py
|
python/143_reorder_list.py
|
Python
| 0.999998 |
@@ -0,0 +1,1657 @@
+%22%22%22%0AGiven a singly linked list L: L0%E2%86%92L1%E2%86%92%E2%80%A6%E2%86%92Ln-1%E2%86%92Ln,%0Areorder it to: L0%E2%86%92Ln%E2%86%92L1%E2%86%92Ln-1%E2%86%92L2%E2%86%92Ln-2%E2%86%92%E2%80%A6%0A%0AYou must do this in-place without altering the nodes' values.%0A%0AFor example,%0AGiven %7B1,2,3,4%7D, reorder it to %7B1,4,2,3%7D.%0A%22%22%22%0A# Definition for singly-linked list.%0A# class ListNode(object):%0A# def __init__(self, x):%0A# self.val = x%0A# self.next = None%0A%0Aclass Solution(object):%0A def reorderList(self, head):%0A %22%22%22%0A :type head: ListNode%0A :rtype: void Do not return anything, modify head in-place instead.%0A %22%22%22%0A if not head or not head.next:%0A return%0A slow, fast = head, head.next%0A while fast and fast.next:%0A slow = slow.next%0A fast = fast.next.next%0A middlehead = slow.next%0A slow.next = None%0A%0A if middlehead and middlehead.next:%0A pre = middlehead%0A cur = middlehead.next%0A nxt = middlehead.next.next%0A pre.next = None%0A while nxt:%0A cur.next = pre%0A pre = cur%0A cur = nxt%0A nxt = nxt.next%0A cur.next = pre%0A head2 = cur%0A elif middlehead:%0A head2 = middlehead%0A%0A p, q = head, head2%0A tmp1 = head.next%0A tmp2 = head2.next%0A while tmp1 and tmp2:%0A p.next = q%0A q.next = tmp1%0A p, q = tmp1, tmp2%0A tmp1, tmp2 = tmp1.next, tmp2.next%0A p.next = q%0A if tmp1:%0A q.next = tmp1%0A%0Afrom singlyLinkedList import singlyLinkedList%0Aa = singlyLinkedList(%5B1,2,3,4,5,6%5D)%0Aa.printNodes()%0Asoln = Solution()%0Asoln.reorderList(a.head)%0Aa.printNodes()%0A
|
|
38756d3fd7ac1d858d45f256e8d4ad118ecbf531
|
add basic admin file
|
emencia/django/socialaggregator/admin.py
|
emencia/django/socialaggregator/admin.py
|
Python
| 0 |
@@ -0,0 +1,583 @@
+%22%22%22Admin for parrot.gallery%22%22%22%0Afrom django.contrib import admin%0Afrom django.utils.translation import ugettext_lazy as _%0A%0A%0Afrom emencia.django.socialaggregator.models import Feed%0Afrom emencia.django.socialaggregator.models import Aggregator%0Afrom emencia.django.socialaggregator.models import Ressource%0A%0Aclass FeedAdmin(admin.ModelAdmin):%0A pass%0A%0Aadmin.site.register(Feed, FeedAdmin)%0A%0Aclass AggregatorAdmin(admin.ModelAdmin):%0A pass%0A%0Aadmin.site.register(Aggregator, AggregatorAdmin)%0A%0Aclass RessourceAdmin(admin.ModelAdmin):%0A pass%0A%0Aadmin.site.register(Ressource, RessourceAdmin)%0A
|
|
8b7db3fc9b90897c0e8da6d6b63d12e79754c625
|
Solve Knowit2019/19
|
knowit2019/19.py
|
knowit2019/19.py
|
Python
| 0.999966 |
@@ -0,0 +1,436 @@
+def hidden_palindrome(n):%0A n_s = str(n)%0A%0A if n_s == n_s%5B::-1%5D:%0A return False%0A%0A s = str(n + int(n_s%5B::-1%5D))%0A%0A return s == s%5B::-1%5D%0A%0A%0Adef test_hidden_palindrome():%0A assert hidden_palindrome(38)%0A assert not hidden_palindrome(49)%0A%0A%0Aif __name__ == '__main__':%0A s = 0%0A%0A for x in range(1, 123454321+1):%0A if x %25 1000000 == 0:%0A print(x)%0A%0A s += x if hidden_palindrome(x) else 0%0A%0A print(s)
|
|
605d34f5bb5736851d71e5a5280ad6b889d6189f
|
Check that verbose output is sound.
|
sklearn/neural_network/tests/test_rbm.py
|
sklearn/neural_network/tests/test_rbm.py
|
import sys
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
"""
Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
from the same input
"""
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
"""
Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
the same input even when the input is sparse, and test against non-sparse
"""
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
""" just seek if we don't get NaNs sampling the full digits dataset """
rng = np.random.RandomState(42)
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=10,
n_iter=20, random_state=rng)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
def test_score_samples():
"""Check that the pseudo likelihood is computed without clipping.
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert((rbm1.score_samples(X) < -300).all())
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
"""
Make sure RBM works with sparse input when verbose=True
"""
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
finally:
sys.stdout = old_stdout
|
Python
| 0.999999 |
@@ -3,16 +3,26 @@
port sys
+%0Aimport re
%0A%0Aimport
@@ -3909,10 +3909,24 @@
r=1,
-
%0A
+
@@ -3988,32 +3988,232 @@
.fit(X)%0A
-finally:
+ s = sys.stdout.getvalue()%0A # make sure output is sound%0A assert(re.match(r%22Iteration 0, pseudo-likelihood = -?(%5Cd)+(%5C.%5Cd+)?%22,%0A s))%0A finally:%0A sio = sys.stdout
%0A sys
|
58d19ea654e0c8d250f46b0d72191e48b4bc8588
|
add tests for encryption/decryption in awx.main.utils.common
|
awx/main/tests/unit/common/test_common.py
|
awx/main/tests/unit/common/test_common.py
|
Python
| 0.000001 |
@@ -0,0 +1,1157 @@
+from awx.conf.models import Setting%0Afrom awx.main.utils import common%0A%0A%0Adef test_encrypt_field():%0A field = Setting(pk=123, value='ANSIBLE')%0A encrypted = common.encrypt_field(field, 'value')%0A assert encrypted == '$encrypted$AES$Ey83gcmMuBBT1OEq2lepnw=='%0A assert common.decrypt_field(field, 'value') == 'ANSIBLE'%0A%0A%0Adef test_encrypt_field_without_pk():%0A field = Setting(value='ANSIBLE')%0A encrypted = common.encrypt_field(field, 'value')%0A assert encrypted == '$encrypted$AES$8uIzEoGyY6QJwoTWbMFGhw=='%0A assert common.decrypt_field(field, 'value') == 'ANSIBLE'%0A%0A%0Adef test_encrypt_subfield():%0A field = Setting(value=%7B'name': 'ANSIBLE'%7D)%0A encrypted = common.encrypt_field(field, 'value', subfield='name')%0A assert encrypted == '$encrypted$AES$8uIzEoGyY6QJwoTWbMFGhw=='%0A assert common.decrypt_field(field, 'value', subfield='name') == 'ANSIBLE'%0A%0A%0Adef test_encrypt_field_with_ask():%0A encrypted = common.encrypt_field(Setting(value='ASK'), 'value', ask=True)%0A assert encrypted == 'ASK'%0A%0A%0Adef test_encrypt_field_with_empty_value():%0A encrypted = common.encrypt_field(Setting(value=None), 'value')%0A assert encrypted is None%0A
|
|
8ae3e44b0a43f382c98194b9caa097b62de899ef
|
Add script to save ner data to a csv file
|
nlpppln/save_ner_data.py
|
nlpppln/save_ner_data.py
|
Python
| 0 |
@@ -0,0 +1,1040 @@
+#!/usr/bin/env python%0Aimport click%0Aimport os%0Aimport codecs%0Aimport json%0Aimport pandas as pd%0A%0A%[email protected]()%[email protected]('input_dir', type=click.Path(exists=True))%[email protected]('output_file', type=click.Path())%0Adef nerstats(input_dir, output_file):%0A output_dir = os.path.dirname(output_file)%0A if not os.path.exists(output_dir):%0A os.makedirs(output_dir)%0A%0A frames = %5B%5D%0A%0A files = os.listdir(input_dir)%0A for fi in files:%0A with codecs.open(os.path.join(input_dir, fi), encoding='utf-8') as f:%0A saf = json.load(f)%0A data = %7B%7D%0A data%5B'word'%5D = %5Bt%5B'word'%5D for t in saf%5B'tokens'%5D if 'ne' in t.keys()%5D%0A data%5B'ner'%5D = %5Bt%5B'ne'%5D for t in saf%5B'tokens'%5D if 'ne' in t.keys()%5D%0A data%5B'w_id'%5D = %5Bt%5B'id'%5D for t in saf%5B'tokens'%5D if 'ne' in t.keys()%5D%0A data%5B'text'%5D = %5Bfi for t in saf%5B'tokens'%5D if 'ne' in t.keys()%5D%0A%0A frames.append(pd.DataFrame(data=data))%0A%0A df = pd.concat(frames, ignore_index=True)%0A df.to_csv(output_file)%0A%0A%0Aif __name__ == '__main__':%0A nerstats()%0A
|
|
46a40e7e8fc424cc7e7a601fc99ab2d852cd0980
|
Add example GCP CLI tool. (#69)
|
examples/gcp_cli.py
|
examples/gcp_cli.py
|
Python
| 0 |
@@ -0,0 +1,2618 @@
+# -*- coding: utf-8 -*-%0A# Copyright 2020 Google Inc.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%22%22%22Demo CLI tool for GCP.%22%22%22%0A%0Aimport argparse%0Afrom libcloudforensics import gcp%0A%0A%0Adef ListInstances(args):%0A %22%22%22List GCE instances in GCP project.%0A%0A Args:%0A args (dict): Arguments from ArgumentParser.%0A %22%22%22%0A%0A project = gcp.GoogleCloudProject(args.project)%0A instances = project.ListInstances()%0A%0A print('Instances found:')%0A for instance in instances:%0A bootdisk_name = instances%5Binstance%5D.GetBootDisk().name%0A print('Name: %7B0:s%7D, Bootdisk: %7B1:s%7D'.format(instance, bootdisk_name))%0A%0A%0Adef ListDisks(args):%0A %22%22%22List GCE disks in GCP project.%0A%0A Args:%0A args (dict): Arguments from ArgumentParser.%0A %22%22%22%0A%0A project = gcp.GoogleCloudProject(args.project)%0A disks = project.ListDisks()%0A print('Disks found:')%0A for disk in disks:%0A print('Name: %7B0:s%7D, Zone: %7B1:s%7D'.format(disk, disks%5Bdisk%5D.zone))%0A%0A%0Adef CreateDiskCopy(args):%0A %22%22%22Copy GCE disks to other GCP project.%0A%0A Args:%0A args (dict): Arguments from ArgumentParser.%0A %22%22%22%0A%0A disk = gcp.CreateDiskCopy(%0A args.project, args.dstproject, args.instancename, args.zone)%0A%0A print('Disk copy completed.')%0A print('Name: %7B0:s%7D'.format(disk.name))%0A%0A%0Aif __name__ == '__main__':%0A parser = argparse.ArgumentParser(description='Demo CLI tool for GCP')%0A parser.add_argument('--project', help='The GCP project name')%0A%0A subparsers = parser.add_subparsers()%0A%0A parser_listdisks = subparsers.add_parser('listdisks')%0A parser_listdisks.set_defaults(func=ListDisks)%0A%0A parser_listdisks = subparsers.add_parser('listinstances')%0A parser_listdisks.set_defaults(func=ListInstances)%0A%0A parser_creatediskcopy = subparsers.add_parser('creatediskcopy')%0A parser_creatediskcopy.add_argument(%0A '--dstproject', help='Destination GCP project')%0A parser_creatediskcopy.add_argument('--zone', help='Zone to create disk in')%0A parser_creatediskcopy.add_argument(%0A '--instancename', help='Instance to copy disk from')%0A parser_creatediskcopy.set_defaults(func=CreateDiskCopy)%0A%0A parsed_args = parser.parse_args()%0A if parsed_args.func:%0A parsed_args.func(parsed_args)%0A
|
|
b773c0d7f5ae3b223b8a02ff29d8fb6313b8445f
|
Version bump
|
jrnl/__init__.py
|
jrnl/__init__.py
|
#!/usr/bin/env python
# encoding: utf-8
"""
jrnl is a simple journal application for your command line.
"""
__title__ = 'jrnl'
__version__ = '1.2.1'
__author__ = 'Manuel Ebert'
__license__ = 'MIT License'
__copyright__ = 'Copyright 2013 Manuel Ebert'
from . import Journal
from . import jrnl
from .jrnl import cli
|
Python
| 0.000001 |
@@ -144,11 +144,11 @@
'1.
-2.1
+3.0
'%0A__
|
9cc4067d581f6a97136e0f186dc8aa1dbc734e47
|
verify that the dynamic oracle for ArcEager can reach all projective parses
|
hals/transition_system/arc_eager_test.py
|
hals/transition_system/arc_eager_test.py
|
Python
| 0 |
@@ -0,0 +1,1768 @@
+from copy import copy, deepcopy%0Aimport numpy as np%0Afrom unittest import TestCase%0A%0Afrom transition_system.arc_eager import ArcEager, ArcEagerDynamicOracle%0A%0A%0Adef generate_all_projective_parses(size):%0A arc_eager = ArcEager(1)%0A initial = arc_eager.state(size)%0A stack = %5B%5D%0A stack.append(initial)%0A%0A parses = set()%0A while len(stack):%0A state = stack.pop()%0A if arc_eager.is_final(state):%0A heads, labels = arc_eager.extract_parse(state)%0A parses.add(tuple(heads))%0A else:%0A for action in arc_eager.allowed(state):%0A state_copy = deepcopy(state)%0A arc_eager.perform(state_copy, action)%0A stack.append(state_copy)%0A%0A return parses%0A%0Aclass MockSentence:%0A def __init__(self, num_tokens):%0A self.adjacency = np.zeros((num_tokens, num_tokens), dtype=bool)%0A%0A%0Aclass TestArcEager(TestCase):%0A%0A def test_dynamic_oracle_is_complete(self):%0A SIZE = 4%0A arc_eager = ArcEager(1)%0A dyn_oracle = ArcEagerDynamicOracle()%0A%0A valid_parses = generate_all_projective_parses(SIZE)%0A for valid_parse in valid_parses:%0A sent = MockSentence(len(valid_parse) + 1)%0A for v, u in enumerate(valid_parse):%0A sent.adjacency%5Bu, v%5D = True%0A%0A state = arc_eager.state(SIZE)%0A%0A while not arc_eager.is_final(state):%0A allowed_actions = arc_eager.allowed(state)%0A costs = dyn_oracle(state, sent, allowed_actions)%0A self.assertEqual(costs.min(), 0)%0A index = costs.argmin()%0A arc_eager.perform(state, allowed_actions%5Bindex%5D)%0A%0A heads, labels = arc_eager.extract_parse(state)%0A self.assertEqual(tuple(heads), valid_parse)
|
|
db380d8e6a8dfa5444f82a0978fad3494d923278
|
Add tests of generate_matrix
|
tests/chainer_tests/testing_tests/test_matrix.py
|
tests/chainer_tests/testing_tests/test_matrix.py
|
Python
| 0.000003 |
@@ -0,0 +1,1794 @@
+import unittest%0A%0Aimport numpy%0A%0Afrom chainer import testing%0Afrom chainer.testing import condition%0A%0A%[email protected](*testing.product(%7B%0A 'dtype': %5B%0A numpy.float16, numpy.float32, numpy.float64,%0A numpy.complex64, numpy.complex128,%0A %5D,%0A 'x_s_shapes': %5B%0A ((2, 2), (2,)),%0A ((2, 3), (2,)),%0A ((3, 2), (2,)),%0A ((2, 3, 4), (2, 3)),%0A ((2, 4, 3), (2, 3)),%0A ((0, 2, 3), (0, 2)),%0A %5D,%0A%7D))%0Aclass TestGenerateMatrix(unittest.TestCase):%0A%0A def test_generate_matrix(self):%0A dtype = self.dtype%0A x_shape, s_shape = self.x_s_shapes%0A sv = 0.5 + numpy.random.random(s_shape).astype(dtype().real.dtype)%0A x = testing.generate_matrix(x_shape, dtype=dtype, singular_values=sv)%0A assert x.shape == x_shape%0A%0A s = numpy.linalg.svd(%0A x.astype(numpy.complex128), full_matrices=False, compute_uv=False,%0A )%0A sv_sorted = numpy.sort(sv, axis=-1)%5B..., ::-1%5D%0A%0A rtol = 1e-3 if dtype == numpy.float16 else 1e-7%0A numpy.testing.assert_allclose(s, sv_sorted, rtol=rtol)%0A%0A%0Aclass TestGenerateMatrixInvalid(unittest.TestCase):%0A%0A def test_no_singular_values(self):%0A with self.assertRaises(TypeError):%0A testing.generate_matrix((2, 2))%0A%0A def test_invalid_shape(self):%0A with self.assertRaises(ValueError):%0A testing.generate_matrix((2,), singular_values=1)%0A%0A def test_invalid_dtype(self):%0A with self.assertRaises(ValueError):%0A testing.generate_matrix(%0A (2, 2), dtype=numpy.int32, singular_values=1)%0A%0A def test_shape_mismatch(self):%0A with self.assertRaises(ValueError):%0A testing.generate_matrix(%0A (2, 2), singular_values=numpy.ones(3))%0A%0A%0Atesting.run_module(__name__, __file__)%0A
|
|
3cad51e08ef4c1dcfb11cbb8c32272328b31015a
|
Prepare v1.2.306.dev
|
flexget/_version.py
|
flexget/_version.py
|
"""
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '1.2.305'
|
Python
| 0.000002 |
@@ -443,7 +443,11 @@
2.30
-5
+6.dev
'%0A
|
0ef5aa5abaf220579915e4068fd61513114b0be6
|
Fix evolver get_mif()
|
joommf/drivers/evolver.py
|
joommf/drivers/evolver.py
|
import textwrap
class Minimiser(object):
def __init__(self, m_init, Ms, name, d_mxHxm=0.1):
self.m_init = m_init
self.Ms = Ms
self.name = name
self.d_mxHxm = d_mxHxm
def get_mif(self):
mif = textwrap.dedent("""\
Specify Oxs_CGEvolve:evolver {}
Specify Oxs_MinDriver {{
evolver :evolve
mesh :mesh
Ms {}
m0 {{ Oxs_UniformVectorField {{
vector {{{:.5f}, {:.5f}, {:.5f}}}
}} }}
stopping_mxHxm {}
basename {}
vector_field_output_format {{text \%#.8g}}
}}
""")
return mif.format( # self.solver,
# self.alpha,
# self.gamma,
self.dm,
self.t,
self.Ms,
self.m_init[0],
self.m_init[1],
self.m_init[2],
self.stopping_mxHxm,
self.name
)
class LLG(object):
def __init__(self, t, m_init, Ms, alpha, gamma,
name, solver='rkf54', dm=0.01):
"""
Note:
solver options passed as a string - options
rk2, rk4, rkf54, rkf54m, rkf54s
"""
self.t = t
self.m_init = m_init
self.Ms = Ms
self.alpha = alpha
self.gamma = gamma
self.name = name
self.solver = solver
self.dm = dm
def get_mif(self):
llg_mif = textwrap.dedent("""\
Specify Oxs_RungeKuttaEvolve:evolve {{
method ${} alpha {:.5f}
gamma_G {:.5f}
start_dm {:.5f}
}}
Specify Oxs_TimeDriver [subst {{
evolver :evolve
stopping_time {:.2e}
stage_count 1
mesh :mesh
Ms {:.5e}
m0 {{ Oxs_UniformVectorField {{
vector {{{:.5f}, {:.5f}, {:.5f}}}
}} }}
basename {}
vector_field_output_format {{text \%#.8g}}
}}]
""")
return llg_mif.format(self.solver,
self.alpha,
self.gamma,
self.dm,
self.t,
self.Ms,
self.m_init[0],
self.m_init[1],
self.m_init[2],
self.name
)
if __name__ == '__main__':
llg = LLG(1e-9, (0, 0, 1), 1e6, 0.1, 2.21e5, 'test')
f = open('test_llg.mif', 'w')
f.write(llg.get_mif())
f.close()
|
Python
| 0.000001 |
@@ -295,16 +295,18 @@
volver %7B
+%7B%7D
%7D%0A%0A
@@ -638,117 +638,8 @@
mat(
- # self.solver,%0A # self.alpha,%0A # self.gamma,%0A self.dm,%0A self.t,
%0A
@@ -757,24 +757,17 @@
self.
-stopping
+d
_mxHxm,%0A
|
05e7db377b7f0224ec97d5f96c387d711e1e0f23
|
Add problem
|
src/SRM-144/time.py
|
src/SRM-144/time.py
|
Python
| 0.03246 |
@@ -0,0 +1,318 @@
+%0Aclass Time:%0A%0A def whatTime(self, seconds):%0A hours = seconds / 3600%0A a = 3600%0A leftover = seconds - hours * 3600%0A minutes = leftover / 60%0A final_sec = seconds - hours * 3600 - minutes * 60%0A final = str(hours) + %22:%22 + str(minutes)+ %22:%22 + str(final_sec)%0A return final%0A
|
|
a1de448ff755dc938d70fc41e20b521dc6d4bfb6
|
Fix logging
|
check_mesos.py
|
check_mesos.py
|
#!/usr/bin/env python
import nagiosplugin
import argparse
import logging
import re
import requests
INFINITY = float('inf')
HEALTHY = 1
UNHEALTHY = -1
class MesosMaster(nagiosplugin.Resource):
def __init__(self, baseuri, frameworks):
self.baseuri = baseuri
self.frameworks = frameworks
def probe(self):
master_uri=self.baseuri
logging.info('Looking at %s for redirect', master_uri)
try:
response = requests.head(master_uri + '/master/redirect')
if response.status_code != 307:
yield nagiosplugin.Metric('master redirect', UNHEALTHY)
master_uri = response.headers['Location']
# yield the master redirect later, the summary takes the first check which we want to be 'master health'
except requests.exceptions.RequestException, e:
logging.error('master redirect %s', e)
yield nagiosplugin.Metric('master redirect', UNHEALTHY)
return
logging.info('Base URI is redirected to %s', master_uri)
response = requests.get(master_uri + '/health')
logging.debug('Response from %s is %s', response.request.url, response)
if response.status_code in [200, 204]:
yield nagiosplugin.Metric('master health', HEALTHY)
else:
yield nagiosplugin.Metric('master health', UNHEALTHY)
response = requests.get(master_uri + '/master/state.json')
logging.debug('Response from %s is %s', response.request.url, response)
state = response.json()
has_leader = len(state.get('leader', '')) > 0
yield nagiosplugin.Metric('active slaves', state['activated_slaves'])
yield nagiosplugin.Metric('active leader', 1 if has_leader else 0)
# now we can yield the redirect status, from above
yield nagiosplugin.Metric('master redirect', HEALTHY)
for framework_regex in self.frameworks:
framework = None
for candidate in state['frameworks']:
if re.search(framework_regex, candidate['name']) is not None:
framework = candidate
unregistered_time = INFINITY
if framework is not None:
unregistered_time = framework['unregistered_time']
if not framework['active'] and unregistered_time == 0:
unregistered_time = INFINITY
yield nagiosplugin.Metric('framework ' + framework_regex, unregistered_time, context='framework')
@nagiosplugin.guarded
def main():
argp = argparse.ArgumentParser()
argp.add_argument('-H', '--host', required=True,
help='The hostname of a Mesos master to check')
argp.add_argument('-P', '--port', default=5050,
help='The Mesos master HTTP port - defaults to 5050')
argp.add_argument('-n', '--slaves', default=1,
help='The minimum number of slaves the cluster must be running')
argp.add_argument('-F', '--framework', default=[], action='append',
help='Check that a framework is registered matching the given regex, may be specified multiple times')
argp.add_argument('-v', '--verbose', action='count', default=0,
help='increase output verbosity (use up to 3 times)')
args = argp.parse_args()
unhealthy_range = nagiosplugin.Range('%d:%d' % (HEALTHY - 1, HEALTHY + 1))
slave_range = nagiosplugin.Range('%s:' % (args.slaves,))
check = nagiosplugin.Check(
MesosMaster('http://%s:%d' % (args.host, args.port), args.framework),
nagiosplugin.ScalarContext('master redirect', unhealthy_range, unhealthy_range),
nagiosplugin.ScalarContext('master health', unhealthy_range, unhealthy_range),
nagiosplugin.ScalarContext('active slaves', slave_range, slave_range),
nagiosplugin.ScalarContext('active leader', '1:1', '1:1'),
nagiosplugin.ScalarContext('framework', '0:0', '0:0'))
check.main(verbose=args.verbose)
if __name__ == '__main__':
main()
|
Python
| 0.000007 |
@@ -145,16 +145,57 @@
Y = -1%0A%0A
+log = logging.getLogger(%22nagiosplugin%22)%0A%0A
class Me
@@ -382,28 +382,24 @@
euri%0A log
-ging
.info('Looki
@@ -830,20 +830,16 @@
log
-ging
.error('
@@ -945,20 +945,16 @@
%0A log
-ging
.info('B
@@ -1047,36 +1047,32 @@
health')%0A log
-ging
.debug('Response
@@ -1362,20 +1362,16 @@
%0A log
-ging
.debug('
|
61f542c215c0b45bf8b4121bc4705c760c334aa9
|
Add a SetObjectExtruderOperation class
|
cura/Settings/SetObjectExtruderOperation.py
|
cura/Settings/SetObjectExtruderOperation.py
|
Python
| 0 |
@@ -0,0 +1,1136 @@
+# Copyright (c) 2017 Ultimaker B.V.%0A# Cura is released under the terms of the AGPLv3 or higher.%0A%0Afrom UM.Scene.SceneNode import SceneNode%0Afrom UM.Operations.Operation import Operation%0A%0Afrom cura.Settings.SettingOverrideDecorator import SettingOverrideDecorator%0A%0A## Simple operation to set the extruder a certain object should be printed with.%0Aclass SetObjectExtruderOperation(Operation):%0A def __init__(self, node: SceneNode, extruder_id: str) -%3E None:%0A self._node = node%0A self._extruder_id = extruder_id%0A self._previous_extruder_id = None%0A self._decorator_added = False%0A%0A def undo(self):%0A if self._previous_extruder_id:%0A self._node.callDecoration(%22setActiveExtruder%22, self._previous_extruder_id)%0A%0A def redo(self):%0A stack = self._node.callDecoration(%22getStack%22) #Don't try to get the active extruder since it may be None anyway.%0A if not stack:%0A self._node.addDecorator(SettingOverrideDecorator())%0A%0A self._previous_extruder_id = self._node.callDecoration(%22getActiveExtruder%22)%0A self._node.callDecoration(%22setActiveExtruder%22, self._extruder_id)%0A
|
|
57c29ec11b91505cade24670cc45726a8689bb9a
|
add needed util module
|
hera_mc/cm_utils.py
|
hera_mc/cm_utils.py
|
Python
| 0.000001 |
@@ -0,0 +1,1134 @@
+# -*- mode: python; coding: utf-8 -*-%0A# Copyright 2016 the HERA Collaboration%0A# Licensed under the 2-clause BSD license.%0A%0A%22%22%22Some dumb low-level configuration management utility functions.%0A%0A%22%22%22%0A%0Afrom __future__ import print_function%0A%0Aimport datetime%0A%0Adef _get_datetime(_date,_time):%0A if _date.lower() == 'now':%0A dt_d = datetime.datetime.now()%0A else:%0A data = _date.split('/')%0A dt_d = datetime.datetime(int(data%5B2%5D)+2000,int(data%5B0%5D),int(data%5B1%5D))%0A if _time.lower() == 'now':%0A dt_t = datetime.datetime.now()%0A else:%0A data = _time.split(':')%0A dt_t = datetime.datetime(dt_d.year,dt_d.month,dt_d.day,int(data%5B0%5D),int(data%5B1%5D),0)%0A dt = datetime.datetime(dt_d.year,dt_d.month,dt_d.day,dt_t.hour,dt_t.minute)%0A return dt%0A%0A %0Adef _get_stopdate(_stop_date):%0A if _stop_date:%0A return _stop_date%0A else:%0A return datetime.datetime(2020,12,31)%0A%0Adef _is_active(current, _start_date, _stop_date):%0A _stop_date = _get_stopdate(_stop_date)%0A if current %3E _start_date and current %3C _stop_date:%0A is_active=True%0A else:%0A is_active=False%0A return is_active
|
|
860b7b30f393622dac9badd15d65bf59679580e2
|
Create utils.py
|
image_gnip/utils.py
|
image_gnip/utils.py
|
Python
| 0.000001 |
@@ -0,0 +1,3221 @@
+import os%0Aimport sys%0Aimport time%0Aimport logging.config%0Aimport json%0A%0Aclass Utils:%0A%0A @staticmethod%0A def insert_record(client, dataset_id, table_id, record):%0A %0A result = client.push_rows(dataset_id, table_id, %5Brecord%5D, None) %0A %0A if result.get('insertErrors', None):%0A %0A print %22Record: %25s%22 %25 (json.dumps(record))%0A print %22Error result: %25s%22 %25 result%0A %0A return False%0A %0A return True%0A%0A @staticmethod %0A def import_from_file(client, dataset_id, table_id, filename, single_tweet=False):%0A %0A if single_tweet:%0A %0A record = json.loads(Utils.read_file(SAMPLE_TWEET_FILE))%0A success = Utils.insert_record(client, dataset_id, table_id, record)%0A return success%0A%0A row = 0%0A with open(filename, %22r%22) as f:%0A %0A for tweet in f:%0A %0A record = json.loads(tweet)%0A %0A # ignore delete records for now %0A if record.get(%22delete%22, None):%0A continue%0A %0A record_scrubbed = Utils.scrub(record)%0A success = Utils.insert_record(client, dataset_id, table_id, record_scrubbed)%0A if not success:%0A print %22Failed row: %25s %25s%22 %25 (row, json.dumps(record))%0A return%0A else:%0A print %22Processed row: %25s%22 %25 row%0A %0A row = row + 1%0A%0A @staticmethod%0A def scrub(d):%0A %0A # d.iteritems isn't used as you can't del or the iterator breaks.%0A for key, value in d.items():%0A %0A if value is None:%0A del d%5Bkey%5D%0A elif key == 'coordinates':%0A del d%5Bkey%5D%0A elif key == 'attributes': # in 'place' object %0A del d%5Bkey%5D%0A elif key == 'bounding_box': # in 'place' object%0A del d%5Bkey%5D%0A elif key == 'retweeted_status':%0A del d%5Bkey%5D%0A elif key == 'created_at':%0A d%5Bkey%5D = Utils.convert_timestamp(value)%0A elif isinstance(value, dict):%0A Utils.scrub(value)%0A return d # For convenience%0A %0A @staticmethod%0A def convert_timestamp(str):%0A %0A ts = time.strptime(str,'%25a %25b %25d %25H:%25M:%25S +0000 %25Y')%0A ts = time.strftime('%25Y-%25m-%25d %25H:%25M:%25S', ts)%0A %0A return ts%0A %0A @staticmethod%0A def read_file(fn):%0A %0A data = %22%22%0A with open(fn, %22r%22) as f:%0A for line in f:%0A data = data + line%0A %0A return data %0A %0A @staticmethod %0A def generate_schema_from_tweet():%0A %0A record_str = Utils.read_file(SAMPLE_TWEET_FILE)%0A record = json.loads(record_str)%0A schema_str = schema_from_record(record)%0A %0A return schema_str%0A %0A @staticmethod%0A def enable_logging():%0A LOGGING_CONFIG = os.path.join(os.path.dirname(__file__), %22logging.conf%22)%0A print %22LOGGING_CONFIG%22 + str(LOGGING_CONFIG)%0A logging.config.fileConfig(LOGGING_CONFIG)%0A root = logging.getLogger(%22root%22)%0A return root%0A
|
|
0080b6744b0ed9603ecf28b826e03aef01a58d2c
|
add editmate extension
|
editmate.py
|
editmate.py
|
Python
| 0 |
@@ -0,0 +1,738 @@
+%22%22%22%0AUse TextMate as the editor%0A%0AUsage: %25load_ext editmate%0A%0ANow when you %25edit something, it opens in textmate.%0AThis is only necessary because the textmate command-line entrypoint%0Adoesn't support the +L format for linenumbers, it uses %60-l L%60.%0A%0A%22%22%22%0A%0Afrom subprocess import Popen, list2cmdline%0Afrom IPython.core.error import TryNext%0A%0Adef edit_in_textmate(self, filename, linenum=None, wait=True):%0A cmd = %5B'mate'%5D%0A if wait:%0A cmd.append('-w')%0A if linenum is not None:%0A cmd.extend(%5B'-l', str(linenum)%5D)%0A cmd.append(filename)%0A %0A proc = Popen(list2cmdline(cmd), shell=True)%0A if wait and proc.wait() != 0:%0A raise TryNext()%0A %0Adef load_ipython_extension(ip):%0A ip.set_hook('editor', edit_in_textmate)%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.