commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
---|---|---|---|---|---|---|---|
278cd37ada508701896c2669a215365785f5a261
|
Add eval dispatch (copied from compyle)
|
evalExp.py
|
evalExp.py
|
Python
| 0 |
@@ -0,0 +1,760 @@
+from keywords import *%0Afrom reg import *%0Afrom parse import parse%0A%0Adef evalExp():%0A%09expr = parse(fetch(EXPR)) # make dedicated fetch_expr()?%0A%09# expr = transformMacros(expr)%0A%09evalFunc = getEvalFunc(expr)%0A%09# evalFunc()%0A%09# reassign next step%0A%0Adef getEvalFunc(expr):%0A%09if isVar(expr):%0A%09%09return compVar%0A%0A%09if isNum(expr):%0A%09%09return compNum%0A%0A%09# else%0A%09tag, *_ = expr%0A%0A%09keyword_groups = %7B%0A%09%09define_keys : evalDef, %0A%09%09ass_keys : evalAss, %0A%09%09lambda_keys : evalLambda, %0A%09%09if_keys : evalIf, %0A%09%09begin_keys : evalBegin, %0A%09%09quote_keys : evalQuote%0A%09%7D%0A%0A%09for group in keyword_groups:%0A%09%09if tag in group:%0A%09%09%09return keyword_groups%5Bgroup%5D%0A%0A%09# default%0A%09return evalApp%0A%0Adef isNum(exp):%0A%09try:%0A%09%09return type(int(exp)) == int%0A%09except:%0A%09%09return False%0A%0Adef isVar(exp):%0A%09return type(exp) == str%0A
|
|
0ffd8c1b52b95ef61bcb2ecf7183d1abab55a3ce
|
Rename Documents to Attachments
|
smile_attachment/models/models.py
|
smile_attachment/models/models.py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 Smile (<http://www.smile.fr>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
from openerp import api, fields, models
native__init__ = models.Model.__init__
native_fields_view_get = models.Model.fields_view_get
@api.one
@api.depends()
def _get_attachments(self):
self.attachment_ids = False
def _search_attachments(self, operator, value):
recs = self.env['ir.attachment'].search([('res_model', '=', self._name),
'|', '|',
('description', operator, value),
('index_content', operator, value),
('datas_fname', operator, value)])
return [('id', 'in', [rec.res_id for rec in recs])]
def new__init__(self, pool, cr):
native__init__(self, pool, cr)
name = 'attachment_ids'
if name not in self._columns and name not in self._fields:
field = fields.One2many('ir.attachment', 'res_id', 'Documents', automatic=True,
compute='_get_attachments', search='_search_attachments')
self._add_field(name, field)
def new_fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = native_fields_view_get(self, cr, uid, view_id, view_type, context, toolbar, submenu)
if view_type == 'search':
View = self.pool['ir.ui.view']
arch_etree = etree.fromstring(res['arch'])
element = etree.Element('field', name='attachment_ids')
arch_etree.insert(-1, element)
res['arch'], res['fields'] = View.postprocess_and_fields(cr, uid, self._name, arch_etree, view_id, context=context)
return res
models.Model.__init__ = new__init__
models.Model._get_attachments = _get_attachments
models.Model._search_attachments = _search_attachments
models.Model.fields_view_get = new_fields_view_get
|
Python
| 0 |
@@ -1916,12 +1916,14 @@
', '
-Docu
+Attach
ment
|
5de57ff00037d6f9a04307e60685f47f368cb29f
|
add example script to test calling the ffi
|
example.py
|
example.py
|
Python
| 0 |
@@ -0,0 +1,138 @@
+import scipcffi.ffi as s%0A%0Ascip_ptr = s.ffi.new('SCIP**')%0Arc = s.lib.SCIPcreate(scip_ptr)%0Aassert rc == s.lib.SCIP_OKAY%0Ascip = scip_ptr%5B0%5D%0A%0A
|
|
e0871cd8c106a5f66bffd7a93759747b2f282c46
|
make CommCareBuild.create_from_zip tolorate having directory entries like 'dist/' (by ignoring them)
|
corehq/apps/builds/models.py
|
corehq/apps/builds/models.py
|
from datetime import datetime
from zipfile import ZipFile
from couchdbkit.exceptions import ResourceNotFound
from couchdbkit.ext.django.schema import *
from corehq.apps.builds.jadjar import JadJar
class CommCareBuild(Document):
"""
#python manage.py shell
#>>> from corehq.apps.builds.models import CommCareBuild
#>>> build = CommCareBuild.create_from_zip('/Users/droberts/Desktop/zip/7106.zip', '1.2.dev', 7106)
"""
build_number = IntegerProperty()
version = StringProperty()
time = DateTimeProperty()
def put_file(self, payload, path, filename=None):
"""
Add an attachment to the build (useful for constructing the build)
payload should be a file-like object
filename should be something like "Nokia/S40-generic/CommCare.jar"
"""
if filename:
path = '/'.join([path, filename])
content_type = {
'jad': 'text/vnd.sun.j2me.app-descriptor',
'jar': 'application/java-archive',
}.get(path.split('.')[-1])
self.put_attachment(payload, path, content_type)
def fetch_file(self, path, filename=None):
if filename:
path = '/'.join([path, filename])
return self.fetch_attachment(path)
def get_jadjar(self, path):
"""
build.get_jadjar("Nokia/S40-generic")
"""
try:
jad = self.fetch_file(path, "CommCare.jad")
except ResourceNotFound:
jad = None
return JadJar(
jad=jad,
jar=self.fetch_file(path, "CommCare.jar"),
version=self.version,
build_number=self.build_number
)
@classmethod
def create_from_zip(cls, f, version, build_number):
"""f should be a file-like object or a path to a zipfile"""
self = cls(build_number=build_number, version=version, time=datetime.utcnow())
self.save()
try:
z = ZipFile(f)
for name in z.namelist():
path = name.split('/')
if path[0] == "dist":
path = '/'.join(path[1:])
self.put_file(z.read(name), path)
except:
z.close()
self.delete()
raise
z.close()
return self
@classmethod
def get_build(cls, version, build_number):
build_number = int(build_number)
self = cls.view('builds/all',
startkey=[version, build_number],
endkey=[version, build_number, {}],
limit=1,
include_docs=True,
).one()
if not self:
raise KeyError()
return self
|
Python
| 0 |
@@ -2083,16 +2083,35 @@
= %22dist%22
+ and path%5B-1%5D != %22%22
:%0A
|
3a2a311c3c3f8a6bc2f027bfa247d912122e512e
|
Add test for gaussian
|
tests/functions_tests/test_gaussian.py
|
tests/functions_tests/test_gaussian.py
|
Python
| 0.000004 |
@@ -0,0 +1,1410 @@
+import unittest%0A%0Aimport numpy%0A%0Aimport chainer%0Afrom chainer import cuda%0Afrom chainer import functions%0Afrom chainer import gradient_check%0Afrom chainer import testing%0Afrom chainer.testing import condition%0A%0A%0Aif cuda.available:%0A cuda.init()%0A%0A%0Aclass TestGaussian(unittest.TestCase):%0A%0A def setUp(self):%0A self.m = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)%0A self.v = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)%0A self.gy = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)%0A%0A def check_backward(self, m_data, v_data, y_grad):%0A m = chainer.Variable(m_data)%0A v = chainer.Variable(v_data)%0A y = functions.gaussian(m, v)%0A self.assertEqual(y.data.dtype, numpy.float32)%0A y.grad = y_grad%0A y.backward()%0A%0A func = y.creator%0A f = lambda: func.forward((m.data, v.data))%0A gm, gv = gradient_check.numerical_grad(f, (m.data, v.data), (y.grad,))%0A%0A gradient_check.assert_allclose(gm, m.grad)%0A gradient_check.assert_allclose(gv, v.grad)%0A%0A @condition.retry(3)%0A def test_backward_cpu(self):%0A self.check_backward(self.m, self.v, self.gy)%0A%0A @condition.retry(3)%0A def test_backward_gpu(self):%0A self.check_backward(cuda.to_gpu(self.m),%0A cuda.to_gpu(self.v),%0A cuda.to_gpu(self.gy))%0A%0A%0Atesting.run_module(__name__, __file__)%0A
|
|
945c2c620634c2c816aa446d91773adb75cb87e3
|
Add airmass tool
|
airmass.py
|
airmass.py
|
Python
| 0 |
@@ -0,0 +1,1458 @@
+#!/usr/env/python%0A%0Aimport argparse%0Aimport numpy as np%0Afrom astropy import units as u%0A%0A##-------------------------------------------------------------------------%0A## Parse Command Line Arguments%0A##-------------------------------------------------------------------------%0A## create a parser object for understanding command-line arguments%0Ap = argparse.ArgumentParser(description=%0A'''Convert an elevation above the horizon to an airmass using the Pickering%0A(2002) formula:%0A1 / sin(h + 244/(165 + 47*h%5E1.1))%0Aand estimate the extinction.%0A''')%0A## add arguments%0Ap.add_argument('elevation', type=float,%0A help=%22Elevation (in degrees) above the horizon%22)%0A## add options%0Ap.add_argument(%22--extinction%22, dest=%22extinction%22, type=float,%0A default=0.13, help=%22Extinction in magnitudes per airmass.%22)%0Aargs = p.parse_args()%0A%0A%0A##-------------------------------------------------------------------------%0A## Main Program%0A##-------------------------------------------------------------------------%0Adef main():%0A h = args.elevation * u.degree # elevation of target above horizon%0A magnitudes_per_airmass = args.extinction * u.mag%0A%0A # Pickering 2002 Airmass%0A value = h.value + 244/(165.0 + 47.0*h.value**1.1)%0A airmass = 1.0 / np.sin(value*u.degree)%0A print(f'for EL = %7Bh:.1f%7D')%0A print(f'airmass = %7Bairmass:.2f%7D')%0A%0A extinction = airmass * magnitudes_per_airmass%0A print(f'extinction = %7Bextinction:.2f%7D')%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
12f2198a53d474bb69a6b9118fca0638dcce8aac
|
add data migration
|
accelerator/migrations/0088_remove_community_participation_read_more_prompts.py
|
accelerator/migrations/0088_remove_community_participation_read_more_prompts.py
|
Python
| 0 |
@@ -0,0 +1,1460 @@
+# Generated by Django 2.2.24 on 2022-03-07 12:10%0Aimport re%0A%0Afrom django.db import migrations%0A%0A%0Adef remove_community_participation_read_more_prompts(apps, schema_editor):%0A %22%22%22%0A Target read more prompts:%0A For more information, read about Judging at Mass Challenge.%0A Read more about Mentoring at Mass Challenge.%0A Read more about being an Entrepreneur at Mass Challenge.%0A Read more about Office Hours at Mass Challenge.%0A Read more about Speaking at Mass Challenge.%0A %22%22%22%0A CommunityParticipation = apps.get_model(%0A 'accelerator', 'CommunityParticipation')%0A for participation in CommunityParticipation.objects.all():%0A # remove prompts starting with %22Read more about%22%0A participation.description = re.sub(%0A r' Read more about%5Ba-zA-Z %5D*.$', '', participation.description)%0A # remove prompts starting with %22For more information%22%0A participation.description = re.sub(%0A r' For more information%5Ba-zA-Z, %5D*.$', '', participation.description)%0A # replace non-ascii char %22%E2%80%99%22 with %22'%22%0A participation.description = participation.description.replace('%5Cu2019', %22'%22)%0A participation.save()%0A%0A%0Aclass Migration(migrations.Migration):%0A dependencies = %5B%0A ('accelerator', '0087_update_startup_profile'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(remove_community_participation_read_more_prompts,%0A migrations.RunPython.noop)%0A %5D%0A
|
|
b166cd8cc95ceb56f8d03cacb8903b0936e69210
|
Create solution.py
|
data_structures/linked_list/problems/find_pattern_in_linked_list/py/solution.py
|
data_structures/linked_list/problems/find_pattern_in_linked_list/py/solution.py
|
Python
| 0.000018 |
@@ -0,0 +1,874 @@
+import LinkedList%0A%0A# Linked List Node inside the LinkedList module is declared as:%0A#%0A# class Node:%0A# def __init__(self, val, nxt=None):%0A# self.val = val%0A# self.nxt = nxt%0A#%0A%0Adef FindPatternInLinkedList(head: LinkedList.Node, pattern: LinkedList.Node) -%3E int:%0A if head == None or pattern == None:%0A return -1%0A%0A index = 0%0A tslow = head%0A pnode = pattern%0A%0A while tslow != None:%0A if tslow.val == pattern.val:%0A tfast = tslow%0A pnode = pattern%0A%0A while tfast != None and pnode != None:%0A if tfast.val == pnode.val:%0A tfast = tfast.nxt%0A pnode = pnode.nxt%0A else:%0A break%0A %0A%0A if pnode == None:%0A return index%0A%0A tslow = tslow.nxt%0A index += 1%0A%0A return -1%0A
|
|
9bffe981c018213b87d015a20603c092567bbdf4
|
Initialize multiple class setup; add remaining APIs
|
cobaltuoft/cobalt.py
|
cobaltuoft/cobalt.py
|
Python
| 0 |
@@ -0,0 +1,1691 @@
+from .endpoints import Endpoints%0Afrom .helpers import get, scrape_filters%0A%0A%0Aclass Cobalt:%0A def __init__(self, api_key=None):%0A self.host = 'http://cobalt.qas.im/api/1.0'%0A%0A self.headers = %7B%0A 'Referer': 'Cobalt-UofT-Python'%0A %7D%0A%0A if not api_key or not self._is_valid_key(api_key):%0A raise ValueError('Expected valid API key.')%0A%0A self.headers%5B'Authorization'%5D = api_key%0A%0A self.filter_map = scrape_filters()%0A%0A def _get(self, url, params=None):%0A return get(url=url, params=params, headers=self.headers)%0A%0A def _is_valid_key(self, key):%0A payload = %7B'key': key%7D%0A r = self._get(self.host, params=payload)%0A return r.reason == 'Not Found' and r.status_code == 404%0A%0A def _run(self, api, endpoint=None, params=None):%0A res = Endpoints.run(api=api,%0A endpoint=endpoint,%0A params=params,%0A map=self.filter_map%5Bapi%5D,%0A get=self._get)%0A return res.json()%0A%0A def athletics(self, endpoint=None, params=None):%0A return self._run(api='athletics', endpoint=endpoint, params=params)%0A%0A def buildings(self, endpoint=None, params=None):%0A return self._run(api='buildings', endpoint=endpoint, params=params)%0A%0A def courses(self, endpoint=None, params=None):%0A return self._run(api='courses', endpoint=endpoint, params=params)%0A%0A def food(self, endpoint=None, params=None):%0A return self._run(api='food', endpoint=endpoint, params=params)%0A%0A def textbooks(self, endpoint=None, params=None):%0A return self._run(api='textbooks', endpoint=endpoint, params=params)%0A
|
|
54864841267c4d2cb53ce581c05d8ba9c15eef0c
|
Add lexer
|
balloon.py
|
balloon.py
|
Python
| 0.000001 |
@@ -0,0 +1,959 @@
+from pygments.lexer import *%0Afrom pygments.token import *%0A%0Aclass CustomLexer(RegexLexer):%0A name = 'Balloon'%0A aliases = %5B'balloon'%5D%0A filenames = '*.bl'%0A%0A tokens = %7B%0A 'root': %5B%0A%0A include('keywords'),%0A%0A (r'%5B%5D%7B%7D(),:;%5B%5D', Punctuation),%0A (r'#.*?$', Comment),%0A%0A (r'%5B+-%5D?%5B0-9%5D+%5C.%5B0-9%5D+', Number.Float),%0A (r'%5B+-%5D?%5B0-9%5D+', Number.Integer),%0A%0A (r'%3C=%7C%3E=%7C==%7C%5B+*%3C%3E=%25%5C-%5C/%5D', Operator),%0A (r'(and%7Cor%7Cnot)%5Cb', Operator.Word),%0A%0A (r'%22.*%22', String),%0A%0A (r'(var%7Cfn)%5Cb', Keyword.Declaration),%0A%0A (r'%5Ba-zA-Z_%5D%5Ba-zA-Z0-9_%5D*%5B!?%5D?', Name),%0A (r'%5Cs+', Text)%0A %5D,%0A%0A 'keywords': %5B%0A (words((%0A 'if', 'else', 'loop', 'break', 'continue', 'return',%0A 'Number', 'Bool', 'String', 'Function', 'Tuple',%0A 'any', 'void', 'true', 'false'), suffix=r'%5Cb'),%0A Keyword),%0A %5D,%0A%0A %7D%0A
|
|
d0306518dcc395a051460115d7ef9488f26426cc
|
Add paper shortening tool: input text, output shorter text
|
shorten-pdf/shorten.py
|
shorten-pdf/shorten.py
|
Python
| 0.999999 |
@@ -0,0 +1,1349 @@
+#!/usr/bin/python%0A%0Aimport sys%0A%0ALONG_PARAGRAPH_THRESH = 400%0ALONG_START_LEN = 197%0ALONG_END_LEN = 197%0A%0Aif len(sys.argv) %3C 2:%0A print 'Give me a text file as an argument.'%0A sys.exit(0)%0A%0Af = open(sys.argv%5B1%5D) # open file%0At = f.read() # read text%0Aps = t.split('%5Cn%5Cn') # get paragraphs%0A%0Aps_ = %5B%5D # shortened paragraphs go here%0A%0Afor p in ps:%0A if len(p) %3C LONG_PARAGRAPH_THRESH:%0A ps_.append(p)%0A continue%0A ss = p.split('. ') # get sentences%0A ss_ = %5B%5D # short paragraph sentences go here%0A totlen = 0 # total length of accepted sentences%0A for s in ss:%0A if totlen + len(s) %3E LONG_START_LEN:%0A ss_.append(s%5B:LONG_START_LEN - totlen%5D + %22..%22)%0A break;%0A ss_.append(s)%0A totlen += len(s)%0A index = len(ss_) # index to insert end sentences%0A%0A totlen = 0%0A ss.reverse()%0A for s in ss:%0A if totlen + len(s) %3E LONG_END_LEN:%0A ss_.insert(index, %22...%22 + s%5Blen(s) - (LONG_END_LEN - totlen):%5D)%0A break;%0A ss_.insert(index, s)%0A totlen += len(s)%0A p_ = '. '.join(ss_)%0A ps_.append(p_)%0A%0At_ = '%5Cn%5Cn'.join(ps_)%0Aprint t_%0A
|
|
316a82c5465a13770404b6a302348f192618cd27
|
Add an interface for eagerly evaluating command graph elements
|
libqtile/command_interface.py
|
libqtile/command_interface.py
|
Python
| 0 |
@@ -0,0 +1,2223 @@
+# Copyright (c) 2019, Sean Vig. All rights reserved.%0A#%0A# Permission is hereby granted, free of charge, to any person obtaining a copy%0A# of this software and associated documentation files (the %22Software%22), to deal%0A# in the Software without restriction, including without limitation the rights%0A# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell%0A# copies of the Software, and to permit persons to whom the Software is%0A# furnished to do so, subject to the following conditions:%0A#%0A# The above copyright notice and this permission notice shall be included in%0A# all copies or substantial portions of the Software.%0A#%0A# THE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR%0A# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,%0A# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE%0A# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER%0A# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,%0A# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE%0A# SOFTWARE.%0A%0Afrom abc import abstractmethod, ABCMeta%0Afrom typing import Any, Dict, Tuple%0A%0Afrom libqtile.command_graph import CommandGraphCall%0A%0A%0Aclass EagerCommandInterface(metaclass=ABCMeta):%0A %22%22%22%0A Defines an interface which can be used to eagerly evaluate a given call on%0A a command graph. The implementations of this may use an IPC call to access%0A the running qtile instance remotely, or directly access the qtile instance%0A from within the same process.%0A %22%22%22%0A%0A @abstractmethod%0A def execute(self, call: CommandGraphCall, args: Tuple, kwargs: Dict) -%3E Any:%0A %22%22%22Execute the given call, returning the result of the execution%0A%0A Perform the given command graph call, calling the function with the%0A given arguments and keyword arguments.%0A%0A Parameters%0A ----------%0A call: CommandGraphCall%0A The call on the command graph that is to be performed.%0A args:%0A The arguments to pass into the command graph call.%0A kwargs:%0A The keyword arguments to pass into the command graph call.%0A %22%22%22%0A pass # pragma: no cover%0A
|
|
dff1f9176d7ce77a242263bfc9a0760cd31f0585
|
Add a prototype for cached regex.compile()
|
regex_proxy.py
|
regex_proxy.py
|
Python
| 0.000001 |
@@ -0,0 +1,314 @@
+from regex import *%0Afrom regex import compile as raw_compile%0A%0A%0A_cache = %7B%7D%0A%0A%0A# Wrap regex.compile up so we have a global cache%0Adef compile(s, *args, **args):%0A global _cache%0A try:%0A return _cache%5Bs%5D%0A except KeyError:%0A r = raw_compile(s, *args, **kwargs)%0A _cache%5Bs%5D = r%0A return r%0A
|
|
231e19ed29314bc0d9aad3cd1d69b757364fce7d
|
Create pms.py
|
pms.py
|
pms.py
|
Python
| 0 |
@@ -0,0 +1,550 @@
+import serial%0A%0A# we stop terminal with raspi-config,%0A# we stop bluethooth from /boot/config.txt first,%0A# and currently UART device is /dev/ttyAMAO,%0A# but we still cannot read data from device%0A%0A# failure devices%0A#dev = %22ttyS0%22%0A%0A# work devices%0A#dev = %22ttyAMA0%22%0A#dev = %22serial0%22%0Adev = %22ttyUSB0%22%0A%0Aser = serial.Serial(port=%22/dev/%22+dev,%0A baudrate=9600,%0A parity=serial.PARITY_NONE,%0A stopbits=serial.STOPBITS_ONE,%0A bytesize=serial.EIGHTBITS, timeout=2)%0Awhile True:%0A data = ser.read()%0A print str(data), len(data)%0Aser.close()%0A
|
|
7b1b343c552ee6f124ccceee05f1a6732657c9e1
|
Add initial startup program (pox.py)
|
pox.py
|
pox.py
|
Python
| 0.000002 |
@@ -0,0 +1,807 @@
+#!/usr/bin/python%0A%0Afrom pox.core import core%0Aimport pox.openflow.openflow%0Aimport pox.topology.topology%0Aimport pox.openflow.of_01%0Aimport pox.dumb_l3_switch.dumb_l3_switch%0A%0A# Set default log level%0Aimport logging%0Alogging.basicConfig(level=logging.DEBUG)%0A%0A# Turn on extra info for event exceptions%0Aimport pox.lib.revent.revent as revent%0Arevent.showEventExceptions = True%0A%0A%0Adef startup ():%0A core.register(%22topology%22, pox.topology.topology.Topology())%0A core.register(%22openflow%22, pox.openflow.openflow.OpenFlowHub())%0A core.register(%22switch%22, pox.dumb_l3_switch.dumb_l3_switch.dumb_l3_switch())%0A%0A pox.openflow.of_01.start()%0A%0A%0Aif __name__ == '__main__':%0A try:%0A startup()%0A core.goUp()%0A except:%0A import traceback%0A traceback.print_exc()%0A%0A import code%0A code.interact('Ready.')%0A pox.core.core.quit()%0A
|
|
3834af9b3a6381ac7a2334c7bd2ae6d562e0f20b
|
Create HR_pythonIsLeap.py
|
HR_pythonIsLeap.py
|
HR_pythonIsLeap.py
|
Python
| 0.000764 |
@@ -0,0 +1,307 @@
+def is_leap(year):%0A leap = False%0A %0A # Write your logic here%0A # thought process%0A #if year%254==0:%0A # return True%0A #elif year%25100==0:%0A # return False%0A #elif year%25400==0:%0A # return True%0A %0A # Optimized, Python 3%0A return ((year%254==0)and(year%25100!=0)or(year%25400==0))%0A
|
|
d160d73740c73e2cab8325179e7f0a9ee4ae8c50
|
add disk_usage.py example script
|
examples/disk_usage.py
|
examples/disk_usage.py
|
Python
| 0.000001 |
@@ -0,0 +1,1172 @@
+#!/usr/bin/env python%0A%0A%22%22%22%0AList all mounted disk partitions a-la %22df%22 command.%0A%22%22%22%0A%0Aimport sys%0Aimport psutil%0A%0Adef convert_bytes(n):%0A if n == 0:%0A return %220B%22%0A symbols = ('k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')%0A prefix = %7B%7D%0A for i, s in enumerate(symbols):%0A prefix%5Bs%5D = 1 %3C%3C (i+1)*10%0A for s in reversed(symbols):%0A if n %3E= prefix%5Bs%5D:%0A value = float(n) / prefix%5Bs%5D%0A return '%25.1f%25s' %25 (value, s)%0A%0Adef main():%0A print %22Device Total Used Free Use %25 Type Mount%22%0A for part in psutil.disk_partitions(0):%0A usage = psutil.disk_usage(part.mountpoint)%0A print %22%25-9s %258s %258s %258s %255s%25%25 %258s %25s%22 %25 (part.device,%0A convert_bytes(usage.total),%0A convert_bytes(usage.used),%0A convert_bytes(usage.free),%0A int(usage.percent),%0A part.fstype,%0A part.mountpoint)%0A%0Aif __name__ == '__main__':%0A sys.exit(main())%0A
|
|
7475b73072f0037fc53bcae59e331c4d5a997e86
|
Add auto-fill test cases
|
depot/tests/test_checkout.py
|
depot/tests/test_checkout.py
|
Python
| 0.000001 |
@@ -0,0 +1,1109 @@
+from django.contrib.auth.models import User%0Afrom depot.models import Depot, Organization%0Afrom verleihtool.test import ClientTestCase%0A%0A%0Aclass AutoFillTestCase(ClientTestCase):%0A %22%22%22%0A Test cases asserting the auto-fill functionality for checkout-form%0A%0A :author: Stefan Su%0A %22%22%22%0A def setUp(self):%0A super(AutoFillTestCase, self).setUp()%0A%0A organization = Organization.objects.create()%0A%0A self.depot = Depot.objects.create(%0A name='My Depot',%0A organization=organization%0A )%0A%0A def test_logged_in_autofill_username(self):%0A response = self.as_user.get('/depots/%25d/' %25 self.depot.id)%0A%0A self.assertInHTML(%0A '%3Cinput type=%22text%22 class=%22form-control%22 id=%22id_username%22 name=%22name%22 value=%22user%22%3E',%0A response.content.decode()%0A )%0A%0A def test_not_logged_in_no_autofill(self):%0A response = self.as_guest.get('/depots/%25d/' %25 self.depot.id)%0A%0A self.assertInHTML(%0A str('%3Cinput type=%22text%22 class =%22form-control%22 id=%22id_username%22 name=%22name%22 value=%22%22%3E'),%0A response.content.decode()%0A )%0A
|
|
0caa9035e06e6596a295ed2ed0a6238a2b09f353
|
add PCA and TSNE representation
|
utils/postprocessing/representation.py
|
utils/postprocessing/representation.py
|
Python
| 0 |
@@ -0,0 +1,596 @@
+import numpy as np%0Afrom sklearn.decomposition import PCA%0Afrom sklearn.manifold import TSNE%0Aimport matplotlib.pyplot as plt%0A%0Adef PCA_representation(data, n_components):%0A%09pca = PCA(n_components=n_components)%0A%09return pca.fit_transform(data)%0A%0Adef TSNE_representation(data, n_components):%0A%09model = TSNE(n_components=n_components, random_state=0)%0A%09return model.fit_transform(data) %0A%0Adef plot_PCA(data, n_components, name='PCA Representation'):%0A%09pca = PCA_representation(data, n_components)%0A%0Adef plot_TSNE(data, n_components, name='TSNE Representation'):%0A%09tsne = TSNE_representation(data, n_components)%0A
|
|
3ee47b0adbc379d77f01df51927399ecf3fb24e6
|
Add docstring and comment.
|
examples/mnist-autoencoder.py
|
examples/mnist-autoencoder.py
|
#!/usr/bin/env python
import climate
import matplotlib.pyplot as plt
import theanets
from utils import load_mnist, plot_layers, plot_images
g = climate.add_group('MNIST Example')
g.add_argument('--features', type=int, default=8, metavar='N',
help='train a model using N^2 hidden-layer features')
def main(args):
train, valid, _ = load_mnist()
e = theanets.Experiment(
theanets.Autoencoder,
layers=(784, args.features ** 2, 784))
e.train(train, valid,
input_noise=0.1,
weight_l2=0.0001,
algorithm='rmsprop',
momentum=0.9,
min_improvement=0.1)
plot_layers([e.network.find('hid1', 'w'), e.network.find('out', 'w')])
plt.tight_layout()
plt.show()
v = valid[:100]
plot_images(v, 121, 'Sample data')
plot_images(e.network.predict(v), 122, 'Reconstructed data')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
climate.call(main)
|
Python
| 0 |
@@ -16,16 +16,255 @@
python%0A%0A
+'''Single-layer autoencoder example using MNIST digit data.%0A%0AThis example shows one way to train a single-layer autoencoder model using the%0Ahandwritten MNIST digits.%0A%0AThis example also shows the use of climate command-line arguments.%0A'''%0A%0A
import c
@@ -564,16 +564,55 @@
(args):%0A
+ # load up the MNIST digit dataset.%0A
trai
|
09112412a4814e3727def2547765546bf44c1e7d
|
Test joint refinement of 300 cspad images using Brewster 2018 methods.
|
test/algorithms/refinement/test_cspad_refinement.py
|
test/algorithms/refinement/test_cspad_refinement.py
|
Python
| 0 |
@@ -0,0 +1,1930 @@
+# Test multiple stills refinement.%0A%0Afrom __future__ import absolute_import, division, print_function%0A%0Aimport os%0A%0Afrom dxtbx.model.experiment_list import ExperimentListFactory%0Aimport procrunner%0A%0Adef test1(dials_regression, run_in_tmpdir):%0A %22%22%22%0A Refinement test of 300 CSPAD images, testing auto_reduction, parameter%0A fixing, constraints, SparseLevMar, and sauter_poon outlier rejection. See%0A README in the regression folder for more details.%0A %22%22%22%0A from scitbx import matrix%0A%0A data_dir = os.path.join(dials_regression, %22refinement_test_data%22, %22cspad_refinement%22)%0A%0A result = procrunner.run_process(%5B%0A %22dials.refine%22,%0A os.path.join(data_dir, %22cspad_refined_experiments_step6_level2_300.json%22),%0A os.path.join(data_dir, %22cspad_reflections_step7_300.pickle%22),%0A os.path.join(data_dir, %22refine.phil%22),%0A %5D)%0A assert result%5B'exitcode'%5D == 0%0A assert result%5B'stderr'%5D == ''%0A%0A # load results%0A reg_exp = ExperimentListFactory.from_json_file(%0A os.path.join(data_dir, %22regression_experiments.json%22),%0A check_format=False)%0A ref_exp = ExperimentListFactory.from_json_file(%22refined_experiments.json%22,%0A check_format=False)%0A%0A # compare results%0A tol = 1e-5%0A for b1, b2 in zip(reg_exp.beams(), ref_exp.beams()):%0A assert b1.is_similar_to(b2, wavelength_tolerance=tol,%0A direction_tolerance=tol,%0A polarization_normal_tolerance=tol,%0A polarization_fraction_tolerance=tol)%0A s0_1 = matrix.col(b1.get_unit_s0())%0A s0_2 = matrix.col(b2.get_unit_s0())%0A assert s0_1.accute_angle(s0_2, deg=True) %3C 0.0057 # ~0.1 mrad%0A for c1, c2 in zip(reg_exp.crystals(), ref_exp.crystals()):%0A assert c1.is_similar_to(c2)%0A%0A for d1, d2 in zip(reg_exp.detectors(), ref_exp.detectors()):%0A assert d1.is_similar_to(d2,%0A fast_axis_tolerance=1e-4, slow_axis_tolerance=1e-4, origin_tolerance=1e-2)%0A%0A
|
|
fa521b4358a06d1667864a09cd7195d3a6db764d
|
Add lc206_reverse_linked_list.py
|
lc206_reverse_linked_list.py
|
lc206_reverse_linked_list.py
|
Python
| 0.000001 |
@@ -0,0 +1,625 @@
+%22%22%22206. Reverse Linked List%0AEasy%0A%0AReverse a singly linked list.%0A%0AExample:%0AInput: 1-%3E2-%3E3-%3E4-%3E5-%3ENULL%0AOutput: 5-%3E4-%3E3-%3E2-%3E1-%3ENULL%0A%0AFollow up:%0AA linked list can be reversed either iteratively or recursively. Could you implement both?%0A%22%22%22%0A%0A# Definition for singly-linked list.%0Aclass ListNode(object):%0A def __init__(self, x):%0A self.val = x%0A self.next = None%0A%0A%0Aclass Solution(object):%0A def reverseList(self, head):%0A %22%22%22%0A :type head: ListNode%0A :rtype: ListNode%0A %22%22%22%0A pass%0A%0A%0Adef main():%0A # print Solution().reverseList(head)%0A pass%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
2dd6049c1fa9340d14f4b73f843f7ed4408e84f5
|
Prepare release script init
|
utils/create_release.py
|
utils/create_release.py
|
Python
| 0 |
@@ -0,0 +1,2157 @@
+#!/usr/bin/env python3%0Aimport os%0Aimport datetime%0Aimport subprocess%0Afrom distutils.version import StrictVersion%0A%0APROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))%0A%0A%0Adef main():%0A # git_clean = subprocess.check_output(%0A # %22git status --porcelain%22, shell=True, universal_newlines=True,%0A # ).strip()%0A # if git_clean:%0A # raise RuntimeError(%22Error, git workspace is not clean: %5Cn%7B0%7D%22.format(git_clean))%0A%0A with open(os.path.join(PROJECT_ROOT, %22VERSION%22)) as file_h:%0A current_version = file_h.read().strip()%0A%0A print(%22Current version is: %7B0%7D%22.format(current_version))%0A print(%22Please insert new version:%22)%0A new_version = str(input())%0A%0A if StrictVersion(new_version) %3C= StrictVersion(current_version):%0A raise RuntimeError(%0A %22Error new version is below current version: %7B0%7D %3C %7B1%7D%22.format(%0A new_version, current_version%0A )%0A )%0A%0A try:%0A with open(os.path.join(PROJECT_ROOT, %22CHANGELOG.md%22)) as file_h:%0A changelog = file_h.read()%0A%0A today = datetime.datetime.today()%0A changelog = changelog.replace(%0A %22## master - CURRENT%5Cn%22,%0A %22%22%22%5C%0A## master - CURRENT%0A%0A## %7B0%7D - %7B1%7D%0A%22%22%22.format(%0A new_version, today.strftime(%22%25d/%25m/%25Y%22)%0A ),%0A )%0A%0A with open(os.path.join(PROJECT_ROOT, %22CHANGELOG.md%22), %22w%22) as file_h:%0A file_h.write(changelog)%0A%0A with open(os.path.join(PROJECT_ROOT, %22VERSION%22), %22w%22) as file_h:%0A file_h.write(new_version)%0A%0A subprocess.check_call(%0A 'git commit -a -m %22Version %7B0%7D%22'.format(new_version), shell=True%0A )%0A # subprocess.check_call(%22git tag v%7B0%7D%22.format(new_version), shell=True)%0A # subprocess.check_call(%22git push --tags%22, shell=True)%0A # subprocess.check_call(%22git push%22, shell=True)%0A%0A except subprocess.CalledProcessError as e:%0A print(%22Error detected, cleaning state.%22)%0A # subprocess.call(%22git tag -d v%7B0%7D%22.format(new_version), shell=True)%0A # subprocess.check_call(%22git reset --hard%22, shell=True)%0A raise e%0A%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
|
|
2850713d0add5cb1ae084898bdd6929c0f5bfb3e
|
add simulated annealing stat script
|
master/scripts/planner/solvers/hyperparameter_optimization/test_stat_sa.py
|
master/scripts/planner/solvers/hyperparameter_optimization/test_stat_sa.py
|
Python
| 0 |
@@ -0,0 +1,1331 @@
+import GPy%0Aimport GPyOpt%0Aimport numpy as np%0Afrom sys import path%0Aimport pickle%0Aimport time%0Afrom tqdm import tqdm%0Apath.append(%22..%22)%0Apath.append(%22../..%22)%0Apath.append(%22../../..%22)%0A%0Afrom solver import SimulatedAnnealingSolver, RandomSolver%0Aimport map_converter as m%0A%0Afs = open(%22../../../webserver/data/serialization/mapper.pickle%22, %22rb%22)%0Amapper = pickle.load(fs)%0Afs.close()%0Anb_drone = 1%0Astate = %5B(1059, 842), (505, 1214), (400, 1122), (502, 339), (866, 512), (1073, 82), (669, 1202), (32, 1122), (45, 52), (209, 993), (118, 653), (487, 896), (748, 638), (271, 1067), (1576, 567), (683, 316), (1483, 1156), (1448, 634), (303, 1220), (759, 823), (1614, 991), (1387, 174), (1618, 227), (367, 39), (35, 902), (967, 690), (944, 327), (912, 1029), (184, 1205), (779, 1026), (694, 123), (1502, 395)%5D%0Arplan = RandomSolver(state, mapper, nb_drone)%0Asaplan = SimulatedAnnealingSolver(rplan.state, mapper, nb_drone)%0Ahist = %5B%5D%0Afor i in tqdm(range(100)):%0A rplan.solve()%0A saplan.state = list(rplan.state)%0A saplan.copy_strategy = %22slice%22%0A saplan.steps = 10000000%0A tmax = 987.57443341%0A tmin = 1%0A saplan.Tmax = tmax%0A saplan.Tmin = tmin%0A saplan.updates = 0%0A itinerary, energy = saplan.solve()%0A hist.append(energy)%0Ahist = np.array(hist)%0Aprint(%22Mean:%22, np.mean(hist), %22Var:%22, np.var(hist), %22Std:%22, np.std(hist))%0Aprint(hist)%0A
|
|
edf2fd4c3c73a82f590ec3065cfdf6de4eb58e01
|
Fix include_clients in PostfixCollector
|
src/collectors/postfix/postfix.py
|
src/collectors/postfix/postfix.py
|
# coding=utf-8
"""
Collect stats from postfix-stats. postfix-stats is a simple threaded stats
aggregator for Postfix. When running as a syslog destination, it can be used to
get realtime cumulative stats.
#### Dependencies
* socket
* json (or simeplejson)
* [postfix-stats](https://github.com/disqus/postfix-stats)
"""
import socket
import sys
try:
import json
json # workaround for pyflakes issue #13
except ImportError:
import simplejson as json
import diamond.collector
if sys.version_info < (2, 6):
from string import maketrans
DOTS_TO_UNDERS = maketrans('.', '_')
else:
DOTS_TO_UNDERS = {ord(u'.'): u'_'}
class PostfixCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(PostfixCollector,
self).get_default_config_help()
config_help.update({
'host': 'Hostname to coonect to',
'port': 'Port to connect to',
'include_clients': 'Include client connection stats',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(PostfixCollector, self).get_default_config()
config.update({
'path': 'postfix',
'host': 'localhost',
'port': 7777,
'include_clients': True,
'method': 'Threaded',
})
return config
def get_json(self):
json_string = ''
address = (self.config['host'], int(self.config['port']))
try:
try:
s = socket.create_connection(address, timeout=1)
s.sendall('stats\n')
while 1:
data = s.recv(4096)
if not data:
break
json_string += data
except socket.error:
self.log.exception("Error talking to postfix-stats")
return ''
finally:
if s:
s.close()
return json_string
def get_data(self):
json_string = self.get_json()
try:
data = json.loads(json_string)
except (ValueError, TypeError):
self.log.exception("Error parsing json from postfix-stats")
return None
return data
def collect(self):
data = self.get_data()
if not data:
return
if self.config['include_clients'] and u'clients' in data:
for client, value in data['clients'].iteritems():
# translate dots to underscores in client names
metric = u'.'.join(['clients',
client.translate(DOTS_TO_UNDERS)])
dvalue = self.derivative(metric, value)
self.publish(metric, dvalue)
for action in (u'in', u'recv', u'send'):
if action not in data:
continue
for sect, stats in data[action].iteritems():
for status, value in stats.iteritems():
metric = '.'.join([action,
sect,
status.translate(DOTS_TO_UNDERS)])
dvalue = self.derivative(metric, value)
self.publish(metric, dvalue)
if u'local' in data:
for key, value in data[u'local'].iteritems():
metric = '.'.join(['local', key])
dvalue = self.derivative(metric, value)
self.publish(metric, dvalue)
|
Python
| 0 |
@@ -1446,12 +1446,14 @@
':
-T
+'t
rue
+'
,%0A
@@ -2579,16 +2579,26 @@
lients'%5D
+ == 'true'
and u'c
|
1d4693b6f5b6f8b3912aae1216665272a36b1411
|
Add missing group.py
|
group.py
|
group.py
|
Python
| 0.000387 |
@@ -0,0 +1,218 @@
+from pygame.sprite import Group as pygame_Group%0A%0Aclass Group(pygame_Group):%0A def draw(self, onto, *args, **kw):%0A for sprite in self:%0A sprite.draw(*args, **kw)%0A super(Group, self).draw(onto)%0A
|
|
b680141b9ec5468a5a0890edf25045a6af8b46c2
|
Add run.py
|
run.py
|
run.py
|
Python
| 0.000009 |
@@ -0,0 +1,158 @@
+#!/usr/bin/python%0A# -*- coding:utf8 -*-%0A# Powered By KK Studio%0A%0Afrom app.DNStack import DNStack%0A%0Aif __name__ == %22__main__%22:%0A app = DNStack()%0A app.run()%0A
|
|
d3e786b554bfafeb4f0c16635b80f9911acc4bba
|
add stacked auto encoder file.
|
sae.py
|
sae.py
|
Python
| 0 |
@@ -0,0 +1,2201 @@
+#coding: utf-8%0Aimport requests%0Aimport random, numpy%0Afrom aa import AutoEncoder%0A%0A%0Aclass StackedAutoEncoder:%0A def __init__(self, visible, hiddens):%0A # TODO: fine-tuning layer%0A num_of_nodes= %5Bvisible%5D + hiddens%0A self.auto_encoders = %5B%5D%0A for i in xrange(len(num_of_nodes)-1):%0A self.auto_encoders.append(AutoEncoder(num_of_nodes%5Bi%5D, num_of_nodes%5Bi+1%5D))%0A self.training_layer = 0%0A%0A def train(self, samples, alpha=0.05):%0A for i in xrange(self.training_layer):%0A samples = map(self.auto_encoders%5Bi%5D.encode, samples)%0A self.auto_encoders%5Bself.training_layer%5D.train(samples,alpha)%0A%0A def error(self, samples, alpha=0.05):%0A for i in xrange(self.training_layer):%0A samples = map(self.auto_encoders%5Bi%5D.encode, samples)%0A return self.auto_encoders%5Bself.training_layer%5D.error(samples)%0A%0A def output(self, sample):%0A for i in xrange(self.training_layer):%0A sample = self.auto_encoders%5Bi%5D.encode(sample)%0A top = self.auto_encoders%5Bself.training_layer%5D%0A return top.decode(top.encode(sample))%0A%0A def fix_traning_layer(self):%0A self.training_layer += 1%0A%0A%0A%0A%0Aif __name__=='__main__':%0A resp = requests.get('https://archive.ics.uci.edu/ml/machine-learning-databases/spect/SPECT.train')%0A samples = map(lambda row: row.split(','), resp.text.split('%5Cn'))%0A titles = samples%5B0%5D%0A samples = samples%5B1:%5D%0A samples = filter(lambda arr: len(arr) %3E 1, samples)%0A samples = map(lambda arr: numpy.matrix(%5Bmap(float, arr)%5D), samples)%0A samples = map(lambda mat: mat.transpose(), samples)%0A%0A V = samples%5B0%5D.shape%5B0%5D%0A H = 2*V%0A%0A sae = StackedAutoEncoder(V, %5BV+2,V%5D)%0A%0A for i in xrange(1000):%0A j = int(random.random()*len(samples))%0A #print samples%5Bj:j+10%5D%0A sae.train(samples%5Bj:j+10%5D)%0A if i%3C100 or i%251000 == 0:%0A print sae.error(samples)%0A%0A sae.fix_traning_layer()%0A%0A for i in xrange(1000):%0A j = int(random.random()*len(samples))%0A #print samples%5Bj:j+10%5D%0A sae.train(samples%5Bj:j+10%5D)%0A if i%3C100 or i%251000 == 0:%0A print sae.error(samples)%0A%0A for sample in samples:%0A print sae.output(sample)%0A%0A%0A
|
|
13486556a15cdb2dbfe3f390f973942d93338995
|
Create TryRecord.py
|
TryRecord.py
|
TryRecord.py
|
Python
| 0.000001 |
@@ -0,0 +1,1916 @@
+%22%22%22%0AExample usage of Record class%0A%0AThe MIT License (MIT)%0A%0ACopyright (c) %3C2016%3E %3CLarry McCaig (aka: Larz60+ aka: Larz60p)%3E%0A%0APermission is hereby granted, free of charge, to any person obtaining a%0Acopy of this software and associated documentation files (the %22Software%22),%0Ato deal in the Software without restriction, including without limitation%0Athe rights to use, copy, modify, merge, publish, distribute, sublicense,%0Aand/or sell copies of the Software, and to permit persons to whom the%0ASoftware is furnished to do so, subject to the following conditions:%0A%0AThe above copyright notice and this permission notice shall be included in%0Aall copies or substantial portions of the Software.%0A%0ATHE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR%0AIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,%0AFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE%0AAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER%0ALIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,%0AOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN%0ATHE SOFTWARE.%0A%0A%22%22%22%0Aimport Record%0A%0Aclass TryRecord:%0A def __init__(self, filename=None):%0A if filename:%0A self.rec = Record.Record(filename)%0A%0A def try_record(self):%0A stkrec = self.rec.record%0A%0A print('%5Cnrecords:')%0A for record in stkrec:%0A print(record)%0A%0A keys = stkrec._asdict().keys()%0A print('%5CnKeys:')%0A for key in keys:%0A print('%5Cnkey: %7B%7D'.format(key))%0A thisrec = getattr(stkrec, key)%0A print('filename: %7B%7D'.format(thisrec.filename))%0A print('number of columns: %7B%7D'.format(len(thisrec.columns)))%0A print('column 0 column name: %7B%7D'.format(thisrec.columns%5B0%5D.db_column_name))%0A%0A%0Aif __name__ == '__main__':%0A tr = TryRecord('StockData.json')%0A tr.try_record()%0A
|
|
8d94bbc272b0b39ea3a561671faf696a4851c1a1
|
Create app.py
|
reddit2telegram/channels/MoreTankieChapo/app.py
|
reddit2telegram/channels/MoreTankieChapo/app.py
|
Python
| 0.000003 |
@@ -0,0 +1,151 @@
+#encoding:utf-8%0A%0Asubreddit = 'MoreTankieChapo'%0At_channel = '@MoreTankieChapo'%0A%0A%0Adef send_post(submission, r2t):%0A return r2t.send_simple(submission)%0A
|
|
e5ae14b4438fc7ae15156615206453097b8f759b
|
add wave test
|
Python/WaveTest.py
|
Python/WaveTest.py
|
Python
| 0.000007 |
@@ -0,0 +1,1111 @@
+import requests%0A%0Adef text2code(text):%0A '''%0A convert a string to wave code%0A '''%0A ret = None%0A get_wave_params = %7B'type' : 'text', 'content' : text%7D%0A response = requests.post('http://rest.sinaapp.com/api/post', data=get_wave_params)%0A if response.status_code == 200:%0A try:%0A data = response.json()%0A ret = data%5B'code'%5D%0A except: # json() may cause ValueError%0A pass%0A return ret%0A%0Adef code2text(code):%0A '''%0A convert a wave code to string%0A '''%0A ret = None%0A get_text_params = %7B'code' : code%7D%0A response = requests.get('http://rest.sinaapp.com/api/get', params=get_text_params)%0A if (response.status_code == 200):%0A try:%0A data = response.json()%0A ret = data%5B'content'%5D%0A except:%0A pass%0A return ret%0A%0Adef main():%0A text = 'Flame-Team'%0A code = text2code(text)%0A if code is not None:%0A print text + ' to code is ' + code%0A text_restore = code2text(code)%0A if text_restore is not None:%0A print code + ' to text is ' + text_restore%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
f6a725b5915575f61fcb7c34ac7b464cd304e7b5
|
test mode shows user
|
dj/scripts/tweet.py
|
dj/scripts/tweet.py
|
#!/usr/bin/python
# tweets #client.slug, #video, title and blipurl
# shortens the URL and title if needed
# if over 140 char, url is shortened using bity,
# if still over, title is truncated.
import twitter
import urllib2
import urllib
import time
import pw # see pw_samp.py for sample.
from process import process
# from main.models import Episode, Raw_File, Cut_List
class tweet(process):
ready_state = 5
def shorten(self, url):
return url # hack because auth broke:
## Out[15]: '{\n "errorCode": 203, \n "errorMessage": "You must be authenticated to access shorten", \n "statusCode": "ERROR"\n}'
d=dict(version='2.0.1',login=pw.bitly['user'], apikey=pw.bitly['password'], longurl=url)
q = urllib.urlencode(d)
print q
url = 'http://api.bit.ly/shorten?' + q
data = eval(urllib2.urlopen(url).read())
print data
return data['results'].values()[0]['shorturl']
def mk_tweet(self, prefix, video_name, authors, video_url):
message = ' '.join([prefix, video_name, '-', authors, video_url])
if len(message) > 140:
message = ' '.join([prefix, video_name, video_url])
if len(message) > 140:
short_url = self.shorten(video_url)
message = ' '.join([prefix, video_name, short_url])
if len(message) > 140:
video_name = video_name[:140 - len(message) - 3] + '...'
message = ' '.join([prefix, video_name, short_url])
return message
def process_ep(self, ep):
if self.options.verbose: print ep.id, ep.name
show = ep.show
client = show.client
# use the username for the client, else use the first user in pw.py
user = client.blip_user if client.blip_user else 'nextdayvideo'
blip_url="http://%s.blip.tv/file/%s" % (user,ep.target)
prefix = "#%s #VIDEO" % show.client.slug
tweet = self.mk_tweet(prefix, ep.name, ep.authors, blip_url)
ret=False
if self.options.test:
print 'test mode:', tweet
else:
print 'tweeting:', tweet
# print user,password
t = pw.twit[user]
api = twitter.Api(consumer_key=t['consumer_key'],
consumer_secret=t['consumer_secret'],
access_token_key=t['access_key'],
access_token_secret=t['access_secret'] )
if self.options.verbose: print api.VerifyCredentials()
status = api.PostUpdate(tweet)
d=status.AsDict()
self.last_tweet = d
self.last_tweet_url = "http://twitter.com/#!/squid/status/%s" % (d["id"], )
print self.last_tweet_url
ret=True
return ret
if __name__ == '__main__':
p=tweet()
p.main()
|
Python
| 0.000001 |
@@ -2065,17 +2065,67 @@
t mode:'
-,
+ %0A print 'user:', user%0A print
tweet%0A
|
561f595337106c60c55212dd87d90ed3002de07f
|
disable pretty json (reduces size by 30%)
|
runserver.py
|
runserver.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
from threading import Thread
from pogom import config
from pogom.app import Pogom
from pogom.search import search_loop, set_cover, set_location
from pogom.utils import get_args, insert_mock_data
from pogom.models import create_tables, SearchConfig
from pogom.pgoapi.utilities import get_pos_by_name
log = logging.getLogger(__name__)
def start_locator_thread(args):
search_thread = Thread(target=search_loop, args=(args,))
search_thread.daemon = True
search_thread.name = 'search_thread'
search_thread.start()
if __name__ == '__main__':
args = get_args()
logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(module)11s] [%(levelname)7s] %(message)s')
if not args.debug:
logging.getLogger("peewee").setLevel(logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("pogom.pgoapi.pgoapi").setLevel(logging.WARNING)
logging.getLogger("pogom.pgoapi.rpc_api").setLevel(logging.CRITICAL)
logging.getLogger("pogom.models").setLevel(logging.WARNING)
logging.getLogger("werkzeug").setLevel(logging.WARNING)
elif args.debug == "info":
logging.getLogger("pogom.pgoapi.pgoapi").setLevel(logging.INFO)
logging.getLogger("pogom.models").setLevel(logging.INFO)
logging.getLogger("werkzeug").setLevel(logging.INFO)
elif args.debug == "debug":
logging.getLogger("pogom.pgoapi.pgoapi").setLevel(logging.DEBUG)
logging.getLogger("pogom.pgoapi.pgoapi").setLevel(logging.DEBUG)
logging.getLogger("pogom.models").setLevel(logging.DEBUG)
logging.getLogger("werkzeug").setLevel(logging.INFO)
create_tables()
set_location(args.location, args.radius)
set_cover()
if not args.mock:
start_locator_thread(args)
else:
insert_mock_data(config, 6)
app = Pogom(__name__)
config['ROOT_PATH'] = app.root_path
app.run(threaded=True, debug=args.debug, host=args.host, port=args.port)
|
Python
| 0 |
@@ -1961,16 +1961,70 @@
ot_path%0A
+ app.config%5B'JSONIFY_PRETTYPRINT_REGULAR'%5D = False%0A
app.
|
7b71bbd87234c8cbe8c7fa189c0617b4ca191989
|
Add tweak_billing_log command
|
silver/management/commands/tweak_billing_log.py
|
silver/management/commands/tweak_billing_log.py
|
Python
| 0.000004 |
@@ -0,0 +1,893 @@
+import datetime as dt%0Afrom datetime import datetime%0Afrom optparse import make_option%0A%0Afrom django.core.management.base import BaseCommand%0Afrom django.utils import timezone%0A%0Afrom silver.models import Subscription, BillingLog%0A%0A%0Aclass Command(BaseCommand):%0A option_list = BaseCommand.option_list + (%0A make_option('--date',%0A action='store',%0A dest='date'),%0A )%0A%0A def handle(self, *args, **options):%0A if options%5B'date'%5D:%0A date = datetime.strptime(options%5B'date'%5D, '%25Y-%25m-%25d')%0A else:%0A now = timezone.now().date()%0A date = dt.date(now.year, now.month - 1, 1)%0A%0A for subscription in Subscription.objects.all():%0A self.stdout.write('Tweaking for subscription %25d' %25 subscription.id)%0A BillingLog.objects.create(subscription=subscription,%0A billing_date=date)%0A
|
|
1b023e8471dad22bfb6b8de0d30c0796c30e2a40
|
Copy hello.py from add_snippet branch
|
hello.py
|
hello.py
|
Python
| 0 |
@@ -0,0 +1,2272 @@
+import cygroonga as grn%0Aimport datetime%0A%0Awith grn.Groonga():%0A with grn.Context() as ctx:%0A db = ctx.open_or_create_database(%22test.db%22)%0A table1 = ctx.open_or_create_table(%22table1%22,%0A grn.OBJ_TABLE_HASH_KEY %7C grn.OBJ_PERSISTENT,%0A ctx.at(grn.DB_SHORT_TEXT))%0A print(%22table1 path: %25s%22 %25 table1.path())%0A print(%22table1 name: %25s%22 %25 table1.name())%0A table1.open_or_create_column(%22column1%22,%0A grn.OBJ_PERSISTENT %7C grn.OBJ_COLUMN_SCALAR,%0A ctx.at(grn.DB_TEXT))%0A table1.open_or_create_column(%22created_at%22,%0A grn.OBJ_PERSISTENT %7C grn.OBJ_COLUMN_SCALAR,%0A ctx.at(grn.DB_TIME))%0A id, added = table1.add_record(%22foo%22)%0A print(%22id=%25d, added=%25s%22 %25 (id, added))%0A table1.column(%22column1%22).set_string(id, %22foo1%22)%0A table1.column(%22created_at%22).set_time(id, datetime.datetime.now())%0A%0A print(%22record count=%25d%22 %25 table1.record_count())%0A%0A id = table1.get_record(%22foo%22)%0A print(%22id=%25d%22 %25 id)%0A print(%22column1 value=%25s%22 %25 table1.column(%22column1%22).get_string(id))%0A print(%22created_at value=%25s%22 %25 table1.column(%22created_at%22).get_time(id))%0A%0A index_table1 = ctx.open_or_create_table(%22table1_index%22,%0A grn.OBJ_TABLE_PAT_KEY %7C grn.OBJ_KEY_NORMALIZE %7C%0A grn.OBJ_PERSISTENT,%0A ctx.at(grn.DB_SHORT_TEXT))%0A index_table1.set_default_tokenizer(%22TokenBigram%22)%0A index_table1.open_or_create_index_column(%22table1_index%22,%0A grn.OBJ_PERSISTENT %7C grn.OBJ_COLUMN_INDEX %7C%0A grn.OBJ_WITH_POSITION %7C grn.OBJ_WITH_SECTION,%0A %22table1%22, %5B%22_key%22%5D)%0A%0A q = table1.create_query()%0A print(%22after create_query%22)%0A q.parse(%22_key:@foo%22, None, grn.OP_MATCH, grn.OP_AND,%0A grn.EXPR_SYNTAX_QUERY %7C grn.EXPR_ALLOW_PRAGMA %7C grn.EXPR_ALLOW_COLUMN)%0A print(%22after parse%22)%0A records = table1.select(q)%0A print(%22matched record count=%25d%22 %25 records.record_count())%0A with records.open_table_cursor() as c:%0A while True:%0A record_id = c.next()%0A if not record_id:%0A break%0A print(%22record_id=%25d%22 %25 record_id)%0A%0A #db.remove()%0A
|
|
53b6b1f4b7f58b1a7d748f67e220bd4da147df0e
|
Create hello.py
|
hello.py
|
hello.py
|
Python
| 0.999503 |
@@ -0,0 +1,30 @@
+def main():%0A print(%22Hello!%22)%0A
|
|
724e86e31b6584012af5afe458e0823b9a2ca7ab
|
Create a class named "CreateSpark", which is to solove the problem of "Cannot run multiple SparkContexts at once; existing SparkContext(app=spam-msg-classifier, master=local[8]) created by __init__"
|
myclass/class_create_spark.py
|
myclass/class_create_spark.py
|
Python
| 0 |
@@ -0,0 +1,2559 @@
+# -*- coding: utf-8 -*-%0A# !/usr/bin/python%0A################################### PART0 DESCRIPTION #################################%0A# Filename: class_save_word_to_database.py%0A# Description:%0A#%0A%0A%0A# Author: Shuai Yuan%0A# E-mail: [email protected]%0A# Create: 2015-11-17 20:43:09%0A# Last:%0A__author__ = 'yuens'%0A%0A################################### PART1 IMPORT ######################################%0Aimport logging%0Aimport time%0Afrom pyspark import SparkContext, SparkConf%0A################################### PART2 CLASS && FUNCTION ###########################%0Aclass CreateSpark(object):%0A def __init__(self, pyspark_app_name):%0A self.start = time.clock()%0A%0A logging.basicConfig(level = logging.INFO,%0A format = '%25(asctime)s %25(levelname)5s %25(filename)19s%5Bline:%25(lineno)3d%5D %25(funcName)s %25(message)s',%0A datefmt = '%25y-%25m-%25d %25H:%25M:%25S',%0A filename = './main.log',%0A filemode = 'a')%0A console = logging.StreamHandler()%0A console.setLevel(logging.INFO)%0A%0A formatter = logging.Formatter('%25(asctime)s %25(levelname)5s %25(filename)19s%5Bline:%25(lineno)3d%5D %25(funcName)s %25(message)s')%0A console.setFormatter(formatter)%0A%0A logging.getLogger('').addHandler(console)%0A logging.info(%22START CLASS %7Bclass_name%7D.%22.format(class_name = CreateSpark.__name__))%0A%0A # Configure Spark%0A try:%0A conf = SparkConf().setAppName(pyspark_app_name).setMaster(%22local%5B8%5D%22)%0A self.sc = SparkContext(conf = conf)%0A logging.info(%22Start pyspark successfully.%22)%0A except Exception as e:%0A logging.error(%22Fail in starting pyspark.%22)%0A logging.error(e)%0A%0A%0A%0A def return_spark_context(self):%0A return self.sc%0A%0A%0A%0A def __del__(self):%0A # Close SparkContext%0A try:%0A self.sc.stop()%0A logging.info(%22close SparkContext successfully.%22)%0A except Exception as e:%0A logging.error(e)%0A%0A logging.info(%22END CLASS %7Bclass_name%7D.%22.format(class_name = CreateSpark.__name__))%0A self.end = time.clock()%0A logging.info(%22The class %7Bclass_name%7D run time is : %7Bdelta_time%7D seconds%22.format(class_name = CreateSpark.__name__, delta_time = self.end))%0A################################### PART3 CLASS TEST ##################################%0A%22%22%22%0A# initialization parameter%0Apyspark_app_name = %22spam-msg-classifier%22%0A%0ASparkCreator = CreateSpark(pyspark_app_name = pyspark_app_name)%0Apyspark_sc = SparkCreator.return_spark_context()%0Alogging.info(%22sc.version:%7B0%7D%22.format(pyspark_sc.version))%0A%22%22%22
|
|
2bd453c4a7402f24cd43b49e73d0b95e371e6654
|
add package Feature/sentieon (#9557)
|
var/spack/repos/builtin/packages/sentieon-genomics/package.py
|
var/spack/repos/builtin/packages/sentieon-genomics/package.py
|
Python
| 0 |
@@ -0,0 +1,1491 @@
+# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Aimport os.path%0Afrom spack import *%0A%0A%0Aclass SentieonGenomics(Package):%0A %22%22%22Sentieon provides complete solutions for secondary DNA analysis.%0A Our software improves upon BWA, GATK, Mutect, and Mutect2 based pipelines.%0A The Sentieon tools are deployable on any CPU-based computing system.%0A Please set the path to the sentieon license server with:%0A%0A export SENTIEON_LICENSE=%5BFQDN%5D:%5BPORT%5D%0A%0A Note: A manual download is required.%0A Spack will search your current directory for the download file.%0A Alternatively, add this file to a mirror so that Spack can find it.%0A For instructions on how to set up a mirror, see%0A http://spack.readthedocs.io/en/latest/mirrors.html%22%22%22%0A%0A homepage = %22https://www.sentieon.com/%22%0A url = %22file://%7B0%7D/sentieon-genomics-201808.01.tar.gz%22.format(os.getcwd())%0A%0A version('201808.01', sha256='6d77bcd5a35539549b28eccae07b19a3b353d027720536e68f46dcf4b980d5f7')%0A%0A # Licensing.%0A license_require = True%0A license_vars = %5B'SENTIEON_LICENSE'%5D%0A%0A def install(self, spec, prefix):%0A install_tree('bin', prefix.bin)%0A install_tree('doc', prefix.doc)%0A install_tree('etc', prefix.etc)%0A install_tree('lib', prefix.lib)%0A install_tree('libexec', prefix.libexec)%0A install_tree('share', prefix.share)%0A
|
|
8c1cd72d11836ad913af5c3614137358ddf3efee
|
add mgmt cmd to set related user
|
sources/management/commands/set_related_user.py
|
sources/management/commands/set_related_user.py
|
Python
| 0 |
@@ -0,0 +1,1861 @@
+from django.core.management.base import BaseCommand, CommandError%0Afrom django.core.mail import send_mail%0Afrom django.contrib.auth.models import User%0A# from sources.models import Person%0Aimport random%0A%0A%0Adef set_related_user(email_address, person_id):%0A obj = Person.objects.get(id=person_id)%0A try:%0A user_existing = User.objects.get(email=obj.email_address)%0A except:%0A user_existing = False%0A if user_existing:%0A obj.related_user = user_existing%0A else:%0A username = '%7B%7D%7B%7D'.format(obj.first_name, obj.last_name).lower().replace('-','')%0A choices = 'abcdefghijklmnopqrstuvwxyz0123456789'%0A middle_choices = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%25%5E&*(-_=+)'%0A password = %5C%0A ''.join(%5Brandom.SystemRandom().choice(choices) for i in range(1)%5D) + %5C%0A ''.join(%5Brandom.SystemRandom().choice(middle_choices) for i in range(23)%5D) + %5C%0A ''.join(%5Brandom.SystemRandom().choice(choices) for i in range(1)%5D)%0A user_new = User.objects.create_user(username, password=password)%0A user_new.email = obj.email_address%0A user_new.first_name = obj.first_name%0A user_new.last_name = obj.last_name%0A user_new.save()%0A%0A%0Aclass Command(BaseCommand):%0A help = 'Set the related user for a Person.'%0A%0A def add_arguments(self, parser):%0A ## required%0A parser.add_argument('email', %0A help='Specify the user emamil.'%0A )%0A%0A ## optional%0A # parser.add_argument('-t' '--test',%0A # action='store_true',%0A # # type=str,%0A # dest='test',%0A # default=False,%0A # help=%22Specific whether it's a test or not%22%0A # )%0A%0A def handle(self, *args, **options):%0A ## unpack args%0A email_address = options%5B'email'%5D%0A%0A ## call the function%0A email_add_user(email_address)%0A%0A
|
|
8fd8b6edb0b6e7dee542410a4649a4d69756e3e7
|
Handle strings as strings, and unicode as unicode
|
sentry/helpers.py
|
sentry/helpers.py
|
import logging
import urllib
import urllib2
import uuid
import django
from django.conf import settings
from django.utils.encoding import smart_unicode
from django.utils.hashcompat import md5_constructor
from sentry import conf
_FILTER_CACHE = None
def get_filters():
global _FILTER_CACHE
if _FILTER_CACHE is None:
filters = []
for filter_ in conf.FILTERS:
module_name, class_name = filter_.rsplit('.', 1)
try:
module = __import__(module_name, {}, {}, class_name)
filter_ = getattr(module, class_name)
except Exception:
logging.exception('Unable to import %s' % (filter_,))
continue
filters.append(filter_)
_FILTER_CACHE = filters
for f in _FILTER_CACHE:
yield f
def get_db_engine(alias='default'):
has_multidb = django.VERSION >= (1, 2)
if has_multidb:
value = settings.DATABASES[alias]['ENGINE']
else:
assert alias == 'default', 'You cannot fetch a database engine other than the default on Django < 1.2'
value = settings.DATABASE_ENGINE
return value.rsplit('.', 1)[-1]
def construct_checksum(level=logging.ERROR, class_name='', traceback='', message='', **kwargs):
checksum = md5_constructor(str(level))
checksum.update(class_name or '')
if traceback:
traceback = '\n'.join(traceback.split('\n')[:-3])
message = traceback or message
if isinstance(message, unicode):
message = message.encode('utf-8', 'replace')
checksum.update(message)
return checksum.hexdigest()
def varmap(func, var):
if isinstance(var, dict):
return dict((k, varmap(func, v)) for k, v in var.iteritems())
elif isinstance(var, (list, tuple)):
return [varmap(func, f) for f in var]
else:
return func(var)
def transform(value):
# TODO: make this extendable
# TODO: include some sane defaults, like UUID
# TODO: dont coerce strings to unicode, leave them as strings
if isinstance(value, (tuple, list, set, frozenset)):
return type(value)(transform(o) for o in value)
elif isinstance(value, uuid.UUID):
return repr(value)
elif isinstance(value, dict):
return dict((k, transform(v)) for k, v in value.iteritems())
elif isinstance(value, basestring):
try:
value = unicode(value)
except:
value = force_unicode(value)
return value
elif not isinstance(value, (int, bool)) and value is not None:
# XXX: we could do transform(repr(value)) here
return force_unicode(value)
return value
def force_unicode(value):
try:
value = smart_unicode(value)
except (UnicodeEncodeError, UnicodeDecodeError):
value = '(Error decoding value)'
except Exception: # in some cases we get a different exception
value = smart_unicode(type(value))
return value
def get_installed_apps():
"""
Generate a list of modules in settings.INSTALLED_APPS.
"""
out = set()
for app in settings.INSTALLED_APPS:
out.add(app)
return out
class _Missing(object):
def __repr__(self):
return 'no value'
def __reduce__(self):
return '_missing'
_missing = _Missing()
class cached_property(object):
# This is borrowed from werkzeug : http://bytebucket.org/mitsuhiko/werkzeug-main
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value::
class Foo(object):
@cached_property
def foo(self):
# calculate something important here
return 42
The class has to have a `__dict__` in order for this property to
work.
.. versionchanged:: 0.6
the `writeable` attribute and parameter was deprecated. If a
cached property is writeable or not has to be documented now.
For performance reasons the implementation does not honor the
writeable setting and will always make the property writeable.
"""
# implementation detail: this property is implemented as non-data
# descriptor. non-data descriptors are only invoked if there is
# no entry with the same name in the instance's __dict__.
# this allows us to completely get rid of the access function call
# overhead. If one choses to invoke __get__ by hand the property
# will still work as expected because the lookup logic is replicated
# in __get__ for manual invocation.
def __init__(self, func, name=None, doc=None, writeable=False):
if writeable:
from warnings import warn
warn(DeprecationWarning('the writeable argument to the '
'cached property is a noop since 0.6 '
'because the property is writeable '
'by default for performance reasons'))
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
def __get__(self, obj, type=None):
if obj is None:
return self
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
def urlread(url, get={}, post={}, headers={}, timeout=None):
req = urllib2.Request(url, urllib.urlencode(get), headers=headers)
try:
response = urllib2.urlopen(req, urllib.urlencode(post), timeout).read()
except:
response = urllib2.urlopen(req, urllib.urlencode(post)).read()
return response
|
Python
| 0.998472 |
@@ -2349,18 +2349,84 @@
ue,
-basestring
+unicode):%0A return force_unicode(value)%0A elif isinstance(value, str
):%0A
@@ -2453,23 +2453,18 @@
-value = unicode
+return str
(val
@@ -2495,23 +2495,22 @@
-value =
+return
force_u
@@ -2523,37 +2523,16 @@
(value)%0A
- return value%0A
elif
|
208077afd9b1ba741df6bccafdd5f008e7b75e38
|
Add nftables test
|
meta-iotqa/lib/oeqa/runtime/sanity/nftables.py
|
meta-iotqa/lib/oeqa/runtime/sanity/nftables.py
|
Python
| 0 |
@@ -0,0 +1,2930 @@
+import os%0Aimport subprocess%0Afrom time import sleep%0Afrom oeqa.oetest import oeRuntimeTest%0A%0Aclass NftablesTest(oeRuntimeTest):%0A%0A def check_ssh_connection(self):%0A '''Check SSH connection to DUT port 2222'''%0A process = subprocess.Popen((%22ssh -o UserKnownHostsFile=/dev/null %22 %5C%0A %22-o ConnectTimeout=3 %22 %5C%0A %22-o StrictHostKeyChecking=no root@%22 + %5C%0A self.target.ip +%22 -p 2222 ls%22).split(),%0A stdout=subprocess.PIPE,%0A stderr=subprocess.STDOUT)%0A output, err = process.communicate()%0A output = output.decode(%22utf-8%22)%0A returncode = process.returncode%0A return returncode, output%0A%0A def add_test_table(self):%0A self.target.run(%22nft add table ip test%22)%0A self.target.run(%22nft add chain ip test input %7Btype filter hook input priority 0%5C;%7D%22)%0A self.target.run(%22nft add chain ip test donothing%22)%0A self.target.run(%22nft add chain ip test prerouting %7Btype nat hook prerouting priority 0 %5C;%7D%22)%0A self.target.run(%22nft add chain ip test postrouting %7Btype nat hook postrouting priority 100 %5C;%7D%22)%0A%0A def delete_test_table(self):%0A self.target.run(%22nft delete table ip test%22)%0A%0A def test_reject(self):%0A '''Test rejecting SSH with nftables'''%0A self.add_test_table()%0A self.target.run(%22nft add rule ip test input tcp dport 2222 reject%22)%0A self.target.run(%22nft add rule ip test input goto donothing%22)%0A returncode, output = self.check_ssh_connection()%0A self.delete_test_table()%0A self.assertIn(%22Connection refused%22, output, msg=%22Error message: %25s%22 %25 output)%0A%0A def test_drop(self):%0A '''Test dropping SSH with nftables'''%0A self.add_test_table()%0A self.target.run(%22nft add rule ip test input tcp dport 2222 drop%22)%0A self.target.run(%22nft add rule ip test input goto donothing%22)%0A returncode, output = self.check_ssh_connection()%0A self.delete_test_table()%0A self.assertIn(%22Connection timed out%22, output, msg=%22Error message: %25s%22 %25 output)%0A%0A def test_redirect(self):%0A '''Test redirecting port'''%0A # Check that SSH can't connect to port 2222%0A returncode, output = self.check_ssh_connection()%0A self.assertNotEqual(returncode, 0, msg=%22Error message: %25s%22 %25 output)%0A%0A self.add_test_table()%0A self.target.run(%22nft add rule ip test prerouting tcp dport 2222 redirect to 22%22)%0A # Check that SSH can connect to port 2222%0A returncode, output = self.check_ssh_connection()%0A self.assertEqual(returncode, 0, msg=%22Error message: %25s%22 %25 output)%0A%0A self.delete_test_table()%0A # Check that SSH can't connect to port 2222%0A returncode, output = self.check_ssh_connection()%0A self.assertNotEqual(returncode, 0, msg=%22Error message: %25s%22 %25 output)%0A
|
|
3be145af359df5bcf928da1b984af8635ea33c27
|
add model for parcels, temp until i figure out psql migrations in flask
|
farmsList/farmsList/public/models.py
|
farmsList/farmsList/public/models.py
|
Python
| 0 |
@@ -0,0 +1,445 @@
+# -*- coding: utf-8 -*-%0Afrom farmsList.database import (%0A Column,%0A db,%0A Model,%0A ReferenceCol,%0A relationship,%0A SurrogatePK,%0A)%0A%0A%0Aclass Parcel(SurrogatePK, Model):%0A __tablename__ = 'parcels'%0A name = Column(db.String(80), unique=True, nullable=False)%0A%0A def __init__(self, name, **kwargs):%0A db.Model.__init__(self, name=name, **kwargs)%0A%0A def __repr__(self):%0A return '%3CRole(%7Bname%7D)%3E'.format(name=self.name)%0A
|
|
8a3425209090cb9acc6353ab6fccc0ec31cae804
|
permutations II
|
backtracking/47.py
|
backtracking/47.py
|
Python
| 0.999946 |
@@ -0,0 +1,781 @@
+class Solution:%0A def permuteUnique(self, nums):%0A %22%22%22%0A :type nums: List%5Bint%5D%0A :rtype: List%5BList%5Bint%5D%5D%0A %22%22%22%0A ret = %5B%5D%0A nums.sort()%0A used = %5B0%5D*len(nums)%0A self.dfs(ret, %5B%5D, nums, used)%0A return ret%0A %0A def dfs(self, ret, temp, nums, used):%0A if len(temp) == len(nums):%0A ret.append(temp)%0A return%0A %0A for i in range(0, len(nums)):%0A if used%5Bi%5D:%0A continue%0A # %E5%9B%A0%E4%B8%BAnums%E5%8F%AF%E8%83%BD%E6%9C%89%E9%87%8D%E5%A4%8D, %E6%89%80%E4%BB%A5%E5%AF%B9%E5%BE%85%E9%87%8D%E5%A4%8D%E5%86%85%E5%AE%B9%E9%9C%80%E8%A6%81%E8%A7%86%E4%BD%9C%E4%B8%80%E5%9D%97%E6%95%B4%E4%BD%93%E5%A4%84%E7%90%86, %E4%B9%9F%E5%B0%B1%E6%98%AF%E5%BD%93nums%5Bi%5D%E4%B8%8Enums%5Bi-1%5D%E7%9B%B8%E5%90%8C%E6%97%B6, %E5%8F%AA%E6%9C%89nums%5Bi-1%5D%E8%A2%AB%E4%BD%BF%E7%94%A8%E8%BF%87%E6%97%B6, %E6%89%8D%E5%8F%AF%E4%BB%A5%E4%BD%BF%E7%94%A8nums%5Bi%5D%0A if i %3E 0 and nums%5Bi%5D == nums%5Bi-1%5D and used%5Bi-1%5D:%0A continue%0A used%5Bi%5D = 1%0A self.dfs(ret, temp + %5Bnums%5Bi%5D%5D, nums, used)%0A used%5Bi%5D = 0%0A%0A
|
|
a5d63ec0f8f192aaeae8b9a7f1cf423d18de25dc
|
Add test runner to handle issue with import path
|
server/test.py
|
server/test.py
|
Python
| 0 |
@@ -0,0 +1,40 @@
+import pytest%0A%0Apytest.main('-x tests/')%0A
|
|
8632b60718fa353797ffc53281e57a37caf9452f
|
Add config command for setting the address of rf sensors.
|
set_address.py
|
set_address.py
|
Python
| 0 |
@@ -0,0 +1,425 @@
+import zmq%0Aimport time%0Aimport sys%0Aprint sys.argv%5B1:%5D%0A%0A# ZeroMQ Context%0Acontext = zmq.Context()%0A%0Asock_live = context.socket(zmq.PUB)%0Asock_live.connect(%22tcp://%22+sys.argv%5B1%5D)%0A%0Atime.sleep(1)%0A# Send multipart only allows send byte arrays, so we convert everything to strings before sending%0A# %5BTODO%5D add .encode('UTF-8') when we switch to python3.%0Asock_live.send_multipart(%5B%22set-address%22,'pair',sys.argv%5B2%5D,%220%22%5D)%0Asock_live.close()%0A
|
|
f5f6bc0999d5b6f065adb81982ce3a322e1ab987
|
add regression test for fit_spectrum() Python 3.x issue
|
nmrglue/analysis/tests/test_analysis_linesh.py
|
nmrglue/analysis/tests/test_analysis_linesh.py
|
Python
| 0 |
@@ -0,0 +1,1339 @@
+import numpy as np%0A%0A%0Aimport nmrglue as ng%0Afrom nmrglue.analysis.linesh import fit_spectrum%0A%0A%0Adef test_fit_spectrum():%0A _bb = np.random.uniform(0, 77, size=65536)%0A lineshapes = %5B'g'%5D%0A params = %5B%5B(13797.0, 2.2495075273313034)%5D,%0A %5B(38979.0, 5.8705185693227664)%5D,%0A %5B(39066.0, 5.7125954296137103)%5D,%0A %5B(39153.0, 5.7791485451283791)%5D,%0A %5B(41649.0, 4.260242375400459)%5D,%0A %5B(49007.0, 4.2683625950679964)%5D,%0A %5B(54774.0, 3.2907139764685569)%5D%5D%0A amps = %5B35083.008667, 32493.824402, 32716.156556, 33310.711914, 82682.928405,%0A 82876.544313, 85355.658142%5D%0A bounds = %5B%5B%5B(None, None), (0, None)%5D%5D, %5B%5B(None, None), (0, None)%5D%5D,%0A %5B%5B(None, None), (0, None)%5D%5D, %5B%5B(None, None), (0, None)%5D%5D,%0A %5B%5B(None, None), (0, None)%5D%5D, %5B%5B(None, None), (0, None)%5D%5D,%0A %5B%5B(None, None), (0, None)%5D%5D%5D%0A ampbounds = %5BNone, None, None, None, None, None, None%5D%0A centers = %5B(13797.0,), (38979.0,), (39066.0,), (39153.0,), (41649.0,),%0A (49007.0,), (54774.0,)%5D%0A rIDs = %5B1, 2, 3, 4, 5, 6, 7%5D%0A box_width = (5,)%0A error_flag = False%0A verb = False%0A%0A params_best, amp_best, iers = ng.linesh.fit_spectrum(%0A _bb, lineshapes, params, amps, bounds, ampbounds, centers,%0A rIDs, box_width, error_flag, verb=False)%0A
|
|
c5a2167a63516c23390263408fcd2c9a4f654fc8
|
Add tests for the parse method of the spider
|
webcomix/tests/test_comic_spider.py
|
webcomix/tests/test_comic_spider.py
|
Python
| 0 |
@@ -0,0 +1,1332 @@
+from webcomix.comic_spider import ComicSpider%0A%0A%0Adef test_parse_yields_good_page(mocker):%0A mock_response = mocker.patch('scrapy.http.Response')%0A mock_response.urljoin.return_value = %22http://xkcd.com/3/%22%0A mock_response.url = %22http://xkcd.com/2/%22%0A mock_selector = mocker.patch('scrapy.selector.SelectorList')%0A mock_response.xpath.return_value = mock_selector%0A mock_selector.extract_first.side_effect = %5B%0A '//imgs.xkcd.com/comics/tree_cropped_(1).jpg', 'xkcd.com/3/'%0A %5D%0A%0A spider = ComicSpider()%0A result = spider.parse(mock_response)%0A results = list(result)%0A assert len(results) == 2%0A assert results%5B0%5D.get(%0A 'url') == %22http://imgs.xkcd.com/comics/tree_cropped_(1).jpg%22%0A assert results%5B1%5D.url == %22http://xkcd.com/3/%22%0A%0A%0Adef test_parse_yields_bad_page(mocker):%0A mock_response = mocker.patch('scrapy.http.Response')%0A mock_response.urljoin.return_value = %22http://xkcd.com/3/%22%0A mock_response.url = %22http://xkcd.com/2/%22%0A mock_selector = mocker.patch('scrapy.selector.SelectorList')%0A mock_response.xpath.return_value = mock_selector%0A mock_selector.extract_first.side_effect = %5BNone, 'xkcd.com/3/'%5D%0A%0A spider = ComicSpider()%0A result = spider.parse(mock_response)%0A results = list(result)%0A assert len(results) == 1%0A assert results%5B0%5D.url == %22http://xkcd.com/3/%22%0A
|
|
fd5da951feee92c055853c63b698b44397ead6be
|
Add save function for use across the application
|
app/db_instance.py
|
app/db_instance.py
|
Python
| 0 |
@@ -0,0 +1,165 @@
+from app import db%0A%0Adef save(data):%0A try:%0A print(data)%0A db.session.add(data)%0A db.session.commit()%0A except Exception as e:%0A raise e%0A
|
|
585fec12673ab0207f5b641a9ba0df4a510667ac
|
Add harvester for mblwhoilibrary
|
scrapi/harvesters/mblwhoilibrary.py
|
scrapi/harvesters/mblwhoilibrary.py
|
Python
| 0.000001 |
@@ -0,0 +1,871 @@
+'''%0AHarvester for the WHOAS at MBLWHOI Library for the SHARE project%0A%0AExample API call: http://darchive.mblwhoilibrary.org/oai/request?verb=ListRecords&metadataPrefix=oai_dc%0A'''%0Afrom __future__ import unicode_literals%0A%0Afrom scrapi.base import helpers%0Afrom scrapi.base import OAIHarvester%0A%0A%0Aclass MblwhoilibraryHarvester(OAIHarvester):%0A short_name = 'mblwhoilibrary'%0A long_name = 'WHOAS at MBLWHOI Library'%0A url = 'http://darchive.mblwhoilibrary.org/oai/request'%0A%0A @property%0A def schema(self):%0A return helpers.updated_schema(self._schema, %7B%0A %22uris%22: %7B%0A %22objectUris%22: ('//dc:relation/node()', helpers.oai_extract_dois)%0A %7D%0A %7D)%0A%0A base_url = 'http://darchive.mblwhoilibrary.org/oai/request'%0A property_list = %5B'date', 'relation', 'identifier', 'type', 'format', 'setSpec'%5D%0A timezone_granularity = True%0A
|
|
f5cc9c86b1cfbb2cda1b4c1c4c8656a6ca7a2a7f
|
Create graphingWIP.py
|
src/graphingWIP.py
|
src/graphingWIP.py
|
Python
| 0 |
@@ -0,0 +1,664 @@
+# -*- coding: utf-8 -*-%0Aimport matplotlib.pyplot as plt%0Aimport matplotlib.dates as md%0Aimport dateutil%0A%0A# create empty dynamic arrays %0Atemp_x = %5B%5D%0Ax = %5B%5D%0Ay = %5B%5D%0A%0Af = open(%22temp.log%22, %22r%22) # open log folder%0A%0Afor line in f: # load x and y values%0A%09temp_line = line.split('=')%0A%09temp_x.append(temp_line%5B0%5D%5B:-1%5D) # trim spaces%0A%09y.append(float(temp_line%5B1%5D%5B1:-2%5D)) # trim C%0A%09%0Af.close()%0Ax = %5Bdateutil.parser.parse(s) for s in temp_x%5D%0A%0Aax = plt.gca()%0Axfmt = md.DateFormatter('%25d/%25m/%25Y %25H:%25M:%25S')%0Aax.xaxis.set_major_formatter(xfmt)%0A%0A%0Aplt.plot(x, y)%0A%0Aplt.title('Temprature against time')%0Aplt.xlabel('Date and Time (DD/MM/YYYY HH:MM:SS)')%0Aplt.ylabel('Temprature C')%0Aplt.show()%0A
|
|
534fcff9f812df4cef273ca7853df12647b25d06
|
Add preliminary metrics file and import some from sklearn
|
metrics.py
|
metrics.py
|
Python
| 0 |
@@ -0,0 +1,127 @@
+from sklern.metrics import roc_curve as roc, roc_auc_score as auc%0A%0Adef enrichment_factor():%0A pass%0A%0Adef log_auc():%0A pass%0A%0A
|
|
40e9825ee0a2ccf7c3e92d4fd6599c1976a240a3
|
Add deprecated public `graphql` module
|
fbchat/graphql.py
|
fbchat/graphql.py
|
Python
| 0.000001 |
@@ -0,0 +1,730 @@
+# -*- coding: UTF-8 -*-%0A%22%22%22This file is here to maintain backwards compatability.%22%22%22%0Afrom __future__ import unicode_literals%0A%0Afrom .models import *%0Afrom .utils import *%0Afrom ._graphql import (%0A FLAGS,%0A WHITESPACE,%0A ConcatJSONDecoder,%0A graphql_color_to_enum,%0A get_customization_info,%0A graphql_to_sticker,%0A graphql_to_attachment,%0A graphql_to_extensible_attachment,%0A graphql_to_subattachment,%0A graphql_to_live_location,%0A graphql_to_poll,%0A graphql_to_poll_option,%0A graphql_to_plan,%0A graphql_to_quick_reply,%0A graphql_to_message,%0A graphql_to_user,%0A graphql_to_thread,%0A graphql_to_group,%0A graphql_to_page,%0A graphql_queries_to_json,%0A graphql_response_to_json,%0A GraphQL,%0A)%0A
|
|
f98aa5f336cd81ad55bc46122821df3ad314a4cb
|
Add py-dockerpy-creds (#19198)
|
var/spack/repos/builtin/packages/py-dockerpy-creds/package.py
|
var/spack/repos/builtin/packages/py-dockerpy-creds/package.py
|
Python
| 0 |
@@ -0,0 +1,1294 @@
+# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass PyDockerpyCreds(PythonPackage):%0A %22%22%22Python bindings for the docker credentials store API %22%22%22%0A%0A homepage = %22https://github.com/shin-/dockerpy-creds%22%0A url = %22https://github.com/shin-/dockerpy-creds/archive/0.4.0.tar.gz%22%0A%0A version('0.4.0', sha256='c76c2863c6e9a31b8f70ee5b8b0e5ac6860bfd422d930c04a387599e4272b4b9')%0A version('0.3.0', sha256='3660a5e9fc7c2816ab967e4bdb4802f211e35011357ae612a601d6944721e153')%0A version('0.2.3', sha256='7278a7e3c904ccea4bcc777b991a39cac9d4702bfd7d76b95ff6179500d886c4')%0A version('0.2.2', sha256='bb26b8a8882b9d115a43169663cd9557d132a68147d9a1c77cb4a3ffc9897398')%0A version('0.2.1', sha256='7882efd95f44b5df166b4e34c054b486dc7287932a49cd491edf406763695351')%0A version('0.2.0', sha256='f2838348e1175079e3062bf0769b9fa5070c29f4d94435674b9f8a76144f4e5b')%0A version('0.1.0', sha256='f7ab290cb536e7ef1c774d4eb5df86237e579a9c7a87805da39ff07bd14e0aff')%0A%0A depends_on('[email protected]:2.8,3.4:', type=('build', 'run'))%0A depends_on('py-setuptools', type='build')%0A depends_on('py-six', type=('build', 'run'))%0A
|
|
07a1612250a9c3b2de1ffe53fb916a8cff153c3f
|
add count of collisions
|
findCollisions.py
|
findCollisions.py
|
Python
| 0.000017 |
@@ -0,0 +1,567 @@
+from collections import Counter%0A%0Adef countCollisions(entries):%0A collisions = %5Bk for k,v in Counter(entries).items() if v%3E1%5D%0A num_collisions = len(collisions)%0A print(num_collisions,'word collisions:%5Cn',collisions)%0A return num_collisions%0A%0Adef countCollisionsInFile(filename):%0A entries = %5B%5D%0A with open(filename,'r') as f:%0A for line in f:%0A # get just the words%0A entries.append(line.split(',')%5B1%5D.replace(' %5C'',''))%0A return countCollisions(entries)%0A%0Adef countCollisionsInList(entries):%0A return countCollisions(entries)%0A
|
|
4a0fa1028f22944f30e39c65806f0d123e18420f
|
Create input.py
|
input.py
|
input.py
|
Python
| 0 |
@@ -0,0 +1,102 @@
+ckey=%22%22%0Acsecret=%22%22%0Aatoken=%22%22%0Aasecret=%22%22%0A%0Aquery='' #Add keyword for which you want to start the miner%0A
|
|
98499f07c6dcccba3605e9ab9c8eaef9463b0634
|
Add some validators
|
indra/tools/stmt_validator.py
|
indra/tools/stmt_validator.py
|
Python
| 0.000002 |
@@ -0,0 +1,524 @@
+class StatementValidator:%0A def __init__(self):%0A%0A%0A%0Aclass DbRefsEntryValidator:%0A @staticmethod%0A def validate(entry):%0A raise NotImplementedError()%0A%0A%0Aclass ChebiPrefix(DbRefsEntryValidator):%0A @staticmethod%0A def validate(entry):%0A return not entry or entry.startswith('CHEBI')%0A%0A%0Aclass UniProtIDNotList(DbRefsEntryValidator):%0A @staticmethod%0A def validate(entry):%0A if not isinstance(entry, str):%0A return False%0A if ',' in entry:%0A return False%0A return True
|
|
ba1f04337d0653d4808427b5d07ed8673526b315
|
add mygpo.wsgi
|
mygpo.wsgi
|
mygpo.wsgi
|
Python
| 0.000135 |
@@ -0,0 +1,1229 @@
+#!/usr/bin/python%0A# -*- coding: utf-8 -*-%0A# my.gpodder.org FastCGI handler for lighttpd (default setup)%0A#%0A# This file is part of my.gpodder.org.%0A#%0A# my.gpodder.org is free software: you can redistribute it and/or modify it%0A# under the terms of the GNU Affero General Public License as published by%0A# the Free Software Foundation, either version 3 of the License, or (at your%0A# option) any later version.%0A#%0A# my.gpodder.org is distributed in the hope that it will be useful, but%0A# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY%0A# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public%0A# License for more details.%0A#%0A# You should have received a copy of the GNU Affero General Public License%0A# along with my.gpodder.org. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A%0A%0Aimport sys%0Aimport os%0A%0Aimport _strptime%0A%0A# Add this directory as custom Python path%0Amygpo_root = os.path.dirname(os.path.abspath(__file__))%0Asys.path.insert(0, mygpo_root)%0Asys.path.insert(0, os.path.join(mygpo_root, 'lib'))%0A%0A# Set the DJANGO_SETTINGS_MODULE environment variable%0Aos.environ%5B'DJANGO_SETTINGS_MODULE'%5D = 'mygpo.settings'%0A%0Aimport django.core.handlers.wsgi%0Aapplication = django.core.handlers.wsgi.WSGIHandler()%0A
|
|
44da905cb7e0d81267d75ccfcb7ffa2669f02aeb
|
Change format index to support Python 2.6
|
flask/wrappers.py
|
flask/wrappers.py
|
# -*- coding: utf-8 -*-
"""
flask.wrappers
~~~~~~~~~~~~~~
Implements the WSGI wrappers (request and response).
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from werkzeug.wrappers import Request as RequestBase, Response as ResponseBase
from werkzeug.exceptions import BadRequest
from . import json
from .globals import _request_ctx_stack, current_app
_missing = object()
def _get_data(req, cache):
getter = getattr(req, 'get_data', None)
if getter is not None:
return getter(cache=cache)
return req.data
class Request(RequestBase):
"""The request object used by default in Flask. Remembers the
matched endpoint and view arguments.
It is what ends up as :class:`~flask.request`. If you want to replace
the request object used you can subclass this and set
:attr:`~flask.Flask.request_class` to your subclass.
The request object is a :class:`~werkzeug.wrappers.Request` subclass and
provides all of the attributes Werkzeug defines plus a few Flask
specific ones.
"""
#: The internal URL rule that matched the request. This can be
#: useful to inspect which methods are allowed for the URL from
#: a before/after handler (``request.url_rule.methods``) etc.
#:
#: .. versionadded:: 0.6
url_rule = None
#: A dict of view arguments that matched the request. If an exception
#: happened when matching, this will be ``None``.
view_args = None
#: If matching the URL failed, this is the exception that will be
#: raised / was raised as part of the request handling. This is
#: usually a :exc:`~werkzeug.exceptions.NotFound` exception or
#: something similar.
routing_exception = None
# Switched by the request context until 1.0 to opt in deprecated
# module functionality.
_is_old_module = False
@property
def max_content_length(self):
"""Read-only view of the ``MAX_CONTENT_LENGTH`` config key."""
ctx = _request_ctx_stack.top
if ctx is not None:
return ctx.app.config['MAX_CONTENT_LENGTH']
@property
def endpoint(self):
"""The endpoint that matched the request. This in combination with
:attr:`view_args` can be used to reconstruct the same or a
modified URL. If an exception happened when matching, this will
be ``None``.
"""
if self.url_rule is not None:
return self.url_rule.endpoint
@property
def module(self):
"""The name of the current module if the request was dispatched
to an actual module. This is deprecated functionality, use blueprints
instead.
"""
from warnings import warn
warn(DeprecationWarning('modules were deprecated in favor of '
'blueprints. Use request.blueprint '
'instead.'), stacklevel=2)
if self._is_old_module:
return self.blueprint
@property
def blueprint(self):
"""The name of the current blueprint"""
if self.url_rule and '.' in self.url_rule.endpoint:
return self.url_rule.endpoint.rsplit('.', 1)[0]
@property
def json(self):
"""If the mimetype is :mimetype:`application/json` this will contain the
parsed JSON data. Otherwise this will be ``None``.
The :meth:`get_json` method should be used instead.
"""
from warnings import warn
warn(DeprecationWarning('json is deprecated. '
'Use get_json() instead.'), stacklevel=2)
return self.get_json()
@property
def is_json(self):
"""Indicates if this request is JSON or not. By default a request
is considered to include JSON data if the mimetype is
:mimetype:`application/json` or :mimetype:`application/*+json`.
.. versionadded:: 0.11
"""
mt = self.mimetype
if mt == 'application/json':
return True
if mt.startswith('application/') and mt.endswith('+json'):
return True
return False
def get_json(self, force=False, silent=False, cache=True):
"""Parses the incoming JSON request data and returns it. If
parsing fails the :meth:`on_json_loading_failed` method on the
request object will be invoked. By default this function will
only load the json data if the mimetype is :mimetype:`application/json`
but this can be overridden by the `force` parameter.
:param force: if set to ``True`` the mimetype is ignored.
:param silent: if set to ``True`` this method will fail silently
and return ``None``.
:param cache: if set to ``True`` the parsed JSON data is remembered
on the request.
"""
rv = getattr(self, '_cached_json', _missing)
if rv is not _missing:
return rv
if not (force or self.is_json):
return None
# We accept a request charset against the specification as
# certain clients have been using this in the past. This
# fits our general approach of being nice in what we accept
# and strict in what we send out.
request_charset = self.mimetype_params.get('charset')
try:
data = _get_data(self, cache)
if request_charset is not None:
rv = json.loads(data, encoding=request_charset)
else:
rv = json.loads(data)
except ValueError as e:
if silent:
rv = None
else:
rv = self.on_json_loading_failed(e)
if cache:
self._cached_json = rv
return rv
def on_json_loading_failed(self, e):
"""Called if decoding of the JSON data failed. The return value of
this method is used by :meth:`get_json` when an error occurred. The
default implementation just raises a :class:`BadRequest` exception.
.. versionchanged:: 0.10
Removed buggy previous behavior of generating a random JSON
response. If you want that behavior back you can trivially
add it by subclassing.
.. versionadded:: 0.8
"""
ctx = _request_ctx_stack.top
if ctx is not None:
if ctx.app.config.get('DEBUG', False):
raise BadRequest('Failed to decode JSON object: {}'.format(e))
raise BadRequest()
def _load_form_data(self):
RequestBase._load_form_data(self)
# In debug mode we're replacing the files multidict with an ad-hoc
# subclass that raises a different error for key errors.
ctx = _request_ctx_stack.top
if ctx is not None and ctx.app.debug and \
self.mimetype != 'multipart/form-data' and not self.files:
from .debughelpers import attach_enctype_error_multidict
attach_enctype_error_multidict(self)
class Response(ResponseBase):
"""The response object that is used by default in Flask. Works like the
response object from Werkzeug but is set to have an HTML mimetype by
default. Quite often you don't have to create this object yourself because
:meth:`~flask.Flask.make_response` will take care of that for you.
If you want to replace the response object used you can subclass this and
set :attr:`~flask.Flask.response_class` to your subclass.
"""
default_mimetype = 'text/html'
|
Python
| 0.000002 |
@@ -6505,16 +6505,17 @@
bject: %7B
+0
%7D'.forma
|
1086259090a396b2a2ed40788d1cb8c8ff7c95f3
|
fix the fixme
|
src/robotide/plugins/connector.py
|
src/robotide/plugins/connector.py
|
# Copyright 2008-2009 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robotide.context import LOG, SETTINGS
from robotide import utils
def PluginFactory(application, plugin_class):
try:
plugin = plugin_class(application)
except Exception, err:
return BrokenPlugin(str(err), plugin_class)
else:
return PluginConnector(application, plugin)
class _PluginConnector(object):
def __init__(self, name, doc='', error=None):
self.name = name
self.doc = doc
self.error = error
self.active = False
self.metadata = {}
self.config_panel = lambda self: None
class PluginConnector(_PluginConnector):
def __init__(self, application, plugin):
_PluginConnector.__init__(self, plugin.name, plugin.doc)
self._plugin = plugin
# FIXME: breaks in case the section does not exist
self._settings = SETTINGS['Plugins'][plugin.name]
self.config_panel = plugin.config_panel
self.metadata = plugin.metadata
if self._settings.get('_active', plugin.initially_active):
self.activate()
def activate(self):
self._plugin.activate()
self.active = True
self._settings.set('_active', True)
def deactivate(self):
self._plugin.deactivate()
self.active = False
self._settings.set('_active', False)
class BrokenPlugin(_PluginConnector):
def __init__(self, error, plugin_class):
name = utils.name_from_class(plugin_class, 'Plugin')
doc = 'This plugin is disabled because it failed to load properly.\n' \
+ 'Error: ' + error
_PluginConnector.__init__(self, name, doc=doc, error=error)
LOG.error("Taking %s plugin into use failed:\n%s" % (name, error))
|
Python
| 0.000023 |
@@ -1365,67 +1365,8 @@
gin%0A
- # FIXME: breaks in case the section does not exist%0A
@@ -1405,17 +1405,29 @@
lugins'%5D
-%5B
+.add_section(
plugin.n
@@ -1429,17 +1429,17 @@
gin.name
-%5D
+)
%0A
|
0d22f1ab7f4c83af280edb799f863fa0f46ea326
|
Create generic views for index/login
|
app/views.py
|
app/views.py
|
Python
| 0 |
@@ -0,0 +1,752 @@
+from flask import render_template, flash, redirect%0Afrom app import app%0Afrom .forms.login import LoginForm%0A%0A%[email protected]('/')%[email protected]('/index')%0Adef index():%0A user = %7B'nickname': 'Mark'%7D # fake user%0A return render_template(%22index.html%22,%0A title='Home',%0A user=user)%0A%0A%[email protected]('/login', methods=%5B'GET', 'POST'%5D)%0Adef login():%0A form = LoginForm()%0A if form.validate_on_submit():%0A # Debug Print%0A flash('Login requested for Username=%22%25s%22, remember_me=%25s' %25%0A (form.username.data, str(form.remember_me.data)))%0A return redirect('/index')%0A return render_template('login.html',%0A title='Sign In',%0A form=form)%0A
|
|
45939892a21bbf11ddcd1400d26cf2e94fa8ebac
|
add nox tests.
|
noxfile.py
|
noxfile.py
|
Python
| 0 |
@@ -0,0 +1,1104 @@
+import nox%0A%0APYTHON_VERSIONS = %5B%223.6%22, %223.7%22, %223.8%22%5D%0APACKAGE = %22abilian%22%0A%0A%[email protected](python=%22python3.6%22)%0Adef lint(session):%0A # session.env%5B%22LC_ALL%22%5D = %22en_US.UTF-8%22%0A session.install(%22poetry%22, %22psycopg2-binary%22)%0A session.run(%22poetry%22, %22install%22, %22-q%22)%0A session.run(%22yarn%22, external=%22True%22)%0A session.run(%22make%22, %22lint-ci%22)%0A%0A%[email protected](python=PYTHON_VERSIONS)%0Adef pytest(session):%0A # session.env%5B%22LC_ALL%22%5D = %22en_US.UTF-8%22%0A session.install(%22psycopg2-binary%22)%0A%0A cmd = %22echo ; echo SQLALCHEMY_DATABASE_URI = $SQLALCHEMY_DATABASE_URI ; echo%22%0A session.run(%22sh%22, %22-c%22, cmd, external=True)%0A%0A session.run(%22poetry%22, %22install%22, %22-q%22, external=%22True%22)%0A session.run(%22yarn%22, external=%22True%22)%0A%0A session.run(%22pip%22, %22check%22)%0A session.run(%22pytest%22, %22-q%22)%0A%0A%0A# TODO later%0A# @nox.session(python=%223.8%22)%0A# def typeguard(session):%0A# # session.env%5B%22LC_ALL%22%5D = %22en_US.UTF-8%22%0A# session.install(%22psycopg2-binary%22)%0A# session.run(%22poetry%22, %22install%22, %22-q%22, external=%22True%22)%0A# session.run(%22yarn%22, external=%22True%22)%0A# session.run(%22pytest%22, f%22--typeguard-packages=%7BPACKAGE%7D%22)%0A
|
|
5addf2c2992cfdedf06da58861dae93347e02fb9
|
Support for nox test runner (alternative to tox), provides a workaround for #80.
|
noxfile.py
|
noxfile.py
|
Python
| 0 |
@@ -0,0 +1,1585 @@
+# -*- coding: utf-8 -*-%0A#%0A# Copyright (c) 2016 - 2020 -- Lars Heuer%0A# All rights reserved.%0A#%0A# License: BSD License%0A#%0A%22%22%22%5C%0ANox test runner configuration.%0A%22%22%22%0Aimport os%0Afrom functools import partial%0Aimport shutil%0Aimport nox%0A%0A%[email protected](python=%223%22)%0Adef docs(session):%0A %22%22%22%5C%0A Build the documentation.%0A %22%22%22%0A session.install('-Ur', 'requirements.rtd')%0A output_dir = os.path.abspath(os.path.join(session.create_tmp(), 'output'))%0A doctrees, html, man = map(partial(os.path.join, output_dir), %5B'doctrees', 'html', 'man'%5D)%0A shutil.rmtree(output_dir, ignore_errors=True)%0A session.install('.')%0A session.cd('docs')%0A session.run('sphinx-build', '-W', '-b', 'html', '-d', doctrees, '.', html)%0A session.run('sphinx-build', '-W', '-b', 'man', '-d', doctrees, '.', man)%0A%0A%[email protected](python='3')%0Adef coverage(session):%0A %22%22%22%5C%0A Run coverage.%0A %22%22%22%0A session.install('coverage', '-Ur', 'requirements.testing.txt')%0A session.install('.')%0A session.run('coverage', 'erase')%0A session.run('coverage', 'run', './tests/alltests.py')%0A session.run('coverage', 'report', '--include=segno*')%0A session.run('coverage', 'html', '--include=segno*')%0A%0A%[email protected](python=%5B'2.7', '3.7', 'pypy', 'pypy3'%5D)%0Adef test(session):%0A %22%22%22%5C%0A Run test suite.%0A %22%22%22%0A if session.python == 'pypy':%0A # See %3Chttps://github.com/heuer/segno/issues/80%3E%0A session.run('pip', 'uninstall', '-y', 'pip')%0A session.run('easy_install', 'pip==20.1')%0A session.install('-Ur', 'requirements.testing.txt')%0A session.install('.')%0A session.run('py.test')%0A
|
|
510b90d42dbccd0aa1e3ff48ee8dbe7230b65185
|
Add script to compute some stats about data from energy consumption measures
|
get_stats_from.py
|
get_stats_from.py
|
Python
| 0.000001 |
@@ -0,0 +1,2373 @@
+import argparse%0Aimport csv%0Afrom glob import glob%0Aimport re%0Aimport statistics%0Aimport sys%0A%0Adef get_stats_from(files_names, files_content):%0A for i in range(len(files_content)):%0A file_name = files_names%5Bi%5D%0A file_content = files_content%5Bi%5D%0A print(%22FILE : %7B0%7D%22.format(files_names%5Bi%5D))%0A print(%22%5Ct*MEAN : %7B0%7D%22.format(statistics.mean(file_content)))%0A print(%22%5Ct*MEDIAN : %7B0%7D%22.format(statistics.median(file_content)))%0A try:%0A print(%22%5Ct*MOST TYPICAL VALUE : %7B0%7D%22.format(statistics.mode(file_content)))%0A except:%0A print(%222 most typical values!%22)%0A print(%22%5Ct*STANDARD DEVIATION : %7B0%7D%22.format(statistics.stdev(file_content)))%0A print(%22%5Ct*VARIANCE : %7B0%7D%22.format(statistics.variance(file_content)))%0A%0Adef get_global_stats(files_content):%0A data = %5B%5D%0A for sublist in files_content:%0A data = data + sublist%0A print(%22*GLOBAL MEAN : %7B0%7D%22.format(statistics.mean(data)))%0A print(%22*GLOBAL MEDIAN : %7B0%7D%22.format(statistics.median(data)))%0A try:%0A print(%22*GLOBAL MOST TYPICAL VALUE : %7B0%7D%22.format(statistics.mode(data)))%0A except:%0A print(%222 most typical values!%22)%0A print(%22*GLOBAL STANDARD DEVIATION : %7B0%7D%22.format(statistics.stdev(data)))%0A print(%22*GLOBAL VARIANCE : %7B0%7D%22.format(statistics.variance(data)))%0A%0Adef main():%0A parser = argparse.ArgumentParser(description='Get stats from Powertool output')%0A parser.add_argument('-p', '--path', type=str, default=None, required=True,%0A help=%22specify path to your directories%22)%0A parser.add_argument('-o', '--output', action=%22store_true%22,%0A help=%22save the output in the analysed directory%22)%0A args = parser.parse_args()%0A%0A directories = glob(args.path+%22*%22)%0A%0A if len(directories) == 0:%0A sys.exit(1)%0A%0A csv_files = %5B%5D%0A%0A for directory in directories:%0A current_files = %5Bx for x in glob(directory + %22/*%22) if %22.csv%22 in x%5D%0A csv_files = csv_files + current_files%0A%0A files_content = %5B%5D%0A%0A for csv_file in csv_files:%0A with open(csv_file, %22r%22) as csv_content:%0A csv_reader = csv.reader(csv_content)%0A files_content.append(%5Bfloat(row%5B0%5D) for row in csv_reader if not (re.match(%22%5E%5Cd+?%5C.%5Cd+?$%22, row%5B0%5D) is None)%5D)%0A%0A get_stats_from(directories, files_content)%0A%0A get_global_stats(files_content)%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
32dcc681a82ef2246d0fad441481d6e68f79ddd6
|
Add Python benchmark
|
lib/node_modules/@stdlib/math/base/special/ln/benchmark/python/benchmark.py
|
lib/node_modules/@stdlib/math/base/special/ln/benchmark/python/benchmark.py
|
Python
| 0.000138 |
@@ -0,0 +1,1550 @@
+#!/usr/bin/env python%0A%22%22%22Benchmark ln.%22%22%22%0A%0Afrom __future__ import print_function%0Aimport timeit%0A%0ANAME = %22ln%22%0AREPEATS = 3%0AITERATIONS = 1000000%0A%0A%0Adef print_version():%0A %22%22%22Print the TAP version.%22%22%22%0A print(%22TAP version 13%22)%0A%0A%0Adef print_summary(total, passing):%0A %22%22%22Print the benchmark summary.%0A%0A # Arguments%0A%0A * %60total%60: total number of tests%0A * %60passing%60: number of passing tests%0A%0A %22%22%22%0A print(%22#%22)%0A print(%221..%22 + str(total)) # TAP plan%0A print(%22# total %22 + str(total))%0A print(%22# pass %22 + str(passing))%0A print(%22#%22)%0A print(%22# ok%22)%0A%0A%0Adef print_results(elapsed):%0A %22%22%22Print benchmark results.%0A%0A # Arguments%0A%0A * %60elapsed%60: elapsed time (in seconds)%0A%0A # Examples%0A%0A %60%60%60 python%0A python%3E print_results(0.131009101868)%0A %60%60%60%0A %22%22%22%0A rate = ITERATIONS / elapsed%0A%0A print(%22 ---%22)%0A print(%22 iterations: %22 + str(ITERATIONS))%0A print(%22 elapsed: %22 + str(elapsed))%0A print(%22 rate: %22 + str(rate))%0A print(%22 ...%22)%0A%0A%0Adef benchmark():%0A %22%22%22Run the benchmark and print benchmark results.%22%22%22%0A setup = %22from math import log; from random import random;%22%0A stmt = %22y = log(10000.0*random() - 0.0)%22%0A%0A t = timeit.Timer(stmt, setup=setup)%0A%0A print_version()%0A%0A for i in xrange(REPEATS):%0A print(%22# python::%22 + NAME)%0A elapsed = t.timeit(number=ITERATIONS)%0A print_results(elapsed)%0A print(%22ok %22 + str(i+1) + %22 benchmark finished%22)%0A%0A print_summary(REPEATS, REPEATS)%0A%0A%0Adef main():%0A %22%22%22Run the benchmark.%22%22%22%0A benchmark()%0A%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
|
|
20b450c4cd0ff9c57d894fa263056ff4cd2dbf07
|
Add a vim version of merge business hours
|
vim_turing_machine/machines/merge_business_hours/vim_merge_business_hours.py
|
vim_turing_machine/machines/merge_business_hours/vim_merge_business_hours.py
|
Python
| 0.000001 |
@@ -0,0 +1,365 @@
+from vim_turing_machine.machines.merge_business_hours.merge_business_hours import merge_business_hours_transitions%0Afrom vim_turing_machine.vim_machine import VimTuringMachine%0A%0A%0Aif __name__ == '__main__':%0A merge_business_hours = VimTuringMachine(merge_business_hours_transitions(), debug=True)%0A merge_business_hours.run(initial_tape=sys.argv%5B1%5D, max_steps=50)%0A
|
|
137a7c6e98e0ba8bd916d4ba696b0f0f4e2cdc56
|
Create uptime.py
|
plot-uptime/uptime.py
|
plot-uptime/uptime.py
|
Python
| 0.000024 |
@@ -0,0 +1 @@
+%0A
|
|
e90c48ba46d7971386e01b3def9edbb2df5d74e8
|
Create mummy.py
|
management/commands/mummy.py
|
management/commands/mummy.py
|
Python
| 0.0034 |
@@ -0,0 +1,744 @@
+%22%22%22%0A%0A1. Install model-mommy%0A%0A %60pip install model-mommy%60%0A%0A2. Use the command%0A%0A %60./manage mummy someotherapp.HilariousModelName:9000 yetanotherapp.OmgTheseModelNamesLawl:1%60%0A%0A%22%22%22%0Afrom django.conf import settings%0Afrom django.core.management.base import BaseCommand, CommandError%0A%0Afrom model_mommy import mommy%0A%0A%0Aclass Command(BaseCommand):%0A args = '%3Cmodelpath modelpath:count ...%3E'%0A help = 'Generate model instances using model-mommy'%0A%0A def handle(self, *args, **options):%0A for modelpath in args:%0A count = 1%0A if %22:%22 in modelpath:%0A modelpath, count = modelpath.split(%22:%22)%0A%0A self.stdout.write(%22Processing: %7B%7D%22.format(modelpath))%0A mommy.make(modelpath, _quantity=count)%0A
|
|
24c7231f57d967c4b18a9f37df18b1a7d53e45e5
|
Modify Caffe example to use module interface instead of the deprecated model interface. (#9095)
|
example/caffe/train_model.py
|
example/caffe/train_model.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import logging
import os
def fit(args, network, data_loader, eval_metrics=None, batch_end_callback=None):
# kvstore
kv = mx.kvstore.create(args.kv_store)
# logging
head = '%(asctime)-15s Node[' + str(kv.rank) + '] %(message)s'
if 'log_file' in args and args.log_file is not None:
log_file = args.log_file
log_dir = args.log_dir
log_file_full_name = os.path.join(log_dir, log_file)
if not os.path.exists(log_dir):
os.mkdir(log_dir)
logger = logging.getLogger()
handler = logging.FileHandler(log_file_full_name)
formatter = logging.Formatter(head)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
logger.info('start with arguments %s', args)
else:
logging.basicConfig(level=logging.DEBUG, format=head)
logging.info('start with arguments %s', args)
# load model
model_prefix = args.model_prefix
if model_prefix is not None:
model_prefix += "-%d" % (kv.rank)
model_args = {}
if args.load_epoch is not None:
assert model_prefix is not None
tmp = mx.model.FeedForward.load(model_prefix, args.load_epoch)
model_args = {'arg_params' : tmp.arg_params,
'aux_params' : tmp.aux_params,
'begin_epoch' : args.load_epoch}
# save model
save_model_prefix = args.save_model_prefix
if save_model_prefix is None:
save_model_prefix = model_prefix
checkpoint = None if save_model_prefix is None else mx.callback.do_checkpoint(save_model_prefix)
# data
(train, val) = data_loader(args, kv)
# train
devs = mx.cpu() if args.gpus is None else [
mx.gpu(int(i)) for i in args.gpus.split(',')]
epoch_size = args.num_examples / args.batch_size
if args.kv_store == 'dist_sync':
epoch_size /= kv.num_workers
model_args['epoch_size'] = epoch_size
if 'lr_factor' in args and args.lr_factor < 1:
model_args['lr_scheduler'] = mx.lr_scheduler.FactorScheduler(
step = max(int(epoch_size * args.lr_factor_epoch), 1),
factor = args.lr_factor)
if 'clip_gradient' in args and args.clip_gradient is not None:
model_args['clip_gradient'] = args.clip_gradient
# disable kvstore for single device
if 'local' in kv.type and (
args.gpus is None or len(args.gpus.split(',')) is 1):
kv = None
model = mx.model.FeedForward(
ctx = devs,
symbol = network,
num_epoch = args.num_epochs,
learning_rate = args.lr,
momentum = 0.9,
wd = 0.00001,
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34),
**model_args)
if eval_metrics is None:
eval_metrics = ['accuracy']
## TopKAccuracy only allows top_k > 1
for top_k in [5, 10, 20]:
eval_metrics.append(mx.metric.create('top_k_accuracy', top_k=top_k))
if batch_end_callback is not None:
if not isinstance(batch_end_callback, list):
batch_end_callback = [batch_end_callback]
else:
batch_end_callback = []
batch_end_callback.append(mx.callback.Speedometer(args.batch_size, 50))
model.fit(
X = train,
eval_data = val,
eval_metric = eval_metrics,
kvstore = kv,
batch_end_callback = batch_end_callback,
epoch_end_callback = checkpoint)
|
Python
| 0 |
@@ -3277,25 +3277,24 @@
= None%0A%0A
+%0A
mod
-el
= mx.mo
@@ -3298,351 +3298,36 @@
.mod
-el.FeedForward(%0A ctx = devs,%0A symbol = network,%0A num_epoch = args.num_epochs,%0A learning_rate = args.lr,%0A momentum = 0.9,%0A wd = 0.00001,%0A initializer = mx.init.Xavier(factor_type=%22in%22, magnitude=2.34),%0A **model_arg
+.Module(network, context=dev
s)%0A%0A
@@ -3829,90 +3829,30 @@
mod
-el
.fit(
-%0A X = train,%0A eval_data = val,%0A
+train_data=train,
eva
@@ -3859,26 +3859,17 @@
l_metric
- =
+=
eval_met
@@ -3877,47 +3877,159 @@
ics,
-%0A kvstore = kv,%0A
+ eval_data=val, optimizer='sgd',%0A optimizer_params=%7B'learning_rate':args.lr, 'momentum': 0.9, 'wd': 0.00001%7D,%0A num_epoch=args.num_epochs,
bat
@@ -4043,19 +4043,17 @@
callback
- =
+=
batch_en
@@ -4067,24 +4067,107 @@
ack,%0A
+
+initializer=mx.init.Xavier(factor_type=%22in%22, magnitude=2.34),%0A kvstore=kv,
epoch_end_ca
@@ -4172,19 +4172,17 @@
callback
- =
+=
checkpoi
@@ -4183,10 +4183,25 @@
eckpoint
+, **model_args
)%0A
+%0A
|
3b7de4dbe3611863620cb528092779d25efde025
|
remove dj 3.2 warnings
|
data_exports/apps.py
|
data_exports/apps.py
|
Python
| 0.000002 |
@@ -0,0 +1,227 @@
+#!/usr/bin/env python%0Afrom django.apps import AppConfig%0Afrom django.utils.translation import gettext_lazy as _%0A%0A%0Aclass CsvExportConfig(AppConfig):%0A name = 'data_exports'%0A default_auto_field = %22django.db.models.AutoField%22%0A
|
|
85c732e395e3db4ec63a0d8580d895363d82e4a0
|
Add the salt.output module
|
salt/output.py
|
salt/output.py
|
Python
| 0.000142 |
@@ -0,0 +1,2452 @@
+%22%22%22%0AA simple way of setting the output format for data from modules%0A%22%22%22%0Aimport pprint%0A%0A# Conditionally import the json and yaml modules%0Atry:%0A import json%0A JSON = True%0Aexcept ImportError:%0A JSON = False%0Atry:%0A import yaml%0A YAML = True%0Aexcept ImportError:%0A YAML = False%0A%0A__all__ = ('get_outputter',)%0A%0Aclass Outputter(object):%0A %22%22%22%0A Class for outputting data to the screen.%0A %22%22%22%0A supports = None%0A%0A @classmethod%0A def check(klass, name):%0A # Don't advertise Outputter classes for optional modules%0A if hasattr(klass, %22enabled%22) and not klass.enabled:%0A return False%0A return klass.supports == name%0A%0A def __call__(self, data, **kwargs):%0A print %22Calling Outputter.__call__()%22%0A pprint.pprint(data)%0A%0Aclass TxtOutputter(Outputter):%0A %22%22%22%0A Plain text output. Primarily for returning output from%0A shell commands in the exact same way they would output%0A on the shell when ran directly.%0A %22%22%22%0A supports = %22txt%22%0A def __call__(self, data, **kwargs):%0A if hasattr(data, %22keys%22):%0A for key in data.keys():%0A value = data%5Bkey%5D%0A for line in value.split('%5Cn'):%0A print %22%7B0%7D: %7B1%7D%22.format(key, line)%0A else:%0A # For non-dictionary data, run pprint%0A super(TxtOutputter, self).__call__(data)%0A%0Aclass JSONOutputter(Outputter):%0A %22%22%22JSON output. Chokes on non-serializable objects.%22%22%22%0A supports = %22json%22%0A enabled = JSON%0A%0A def __call__(self, data, **kwargs):%0A try:%0A # A good kwarg might be: indent=4%0A print json.dumps(data, **kwargs)%0A except TypeError:%0A super(JSONOutputter, self).__call__(data)%0A%0Aclass YamlOutputter(Outputter):%0A %22%22%22Yaml output. All of the cool kids are doing it.%22%22%22%0A supports = %22yaml%22%0A enabled = YAML%0A%0A def __call__(self, data, **kwargs):%0A print yaml.dump(data, **kwargs)%0A%0Aclass RawOutputter(Outputter):%0A %22%22%22Raw output. This calls repr() on the returned data.%22%22%22%0A supports = %22raw%22%0A def __call__(self, data, **kwargs):%0A print data%0A%0Adef get_outputter(name=None):%0A %22%22%22%0A Factory function for returning the right output class.%0A%0A Usage:%0A printout = get_outputter(%22txt%22)%0A printout(ret)%0A %22%22%22%0A # Return an actual instance of the correct output class%0A for i in Outputter.__subclasses__():%0A if i.check(name):%0A return i()%0A return Outputter()%0A
|
|
9b0c335fc956c2d2156d169e3636d862ebfbadc0
|
add a scraping script
|
hadairopink.py
|
hadairopink.py
|
Python
| 0.000001 |
@@ -0,0 +1,1458 @@
+#!/usr/bin/env python%0A%0A%22%22%22%0ANo description.%0A%22%22%22%0A%0Aimport sys%0Afrom scrapy import cmdline, Request%0Afrom scrapy.linkextractors import LinkExtractor%0Afrom scrapy.spiders import CrawlSpider, Rule%0A%0ATARGET_DOMAIN = 'hadairopink.com'%0A%0AXPATH_IMAGE_SRC = '//div%5B@class=%22kizi%22%5D//a/img%5Bcontains(@src, %22/wp-content/uploads/%22)%5D/@src'%0AXPATH_PAGINATION = '/html/body//div%5B@class=%22pagination%22%5D/a%5B@data-wpel-link=%22internal%22%5D'%0AXPATH_ENTRY = '/html/body//h3%5B@class=%22entry-title-ac%22%5D/a'%0A%0Aclass Crawler(CrawlSpider):%0A %22%22%22No descrition%22%22%22%0A%0A name = TARGET_DOMAIN%0A allowed_domains = %5BTARGET_DOMAIN%5D%0A custom_settings = %7B%0A 'DOWNLOAD_DELAY': 1,%0A %7D%0A%0A rules = (%0A Rule(LinkExtractor(restrict_xpaths=XPATH_ENTRY), callback='parse_entry'),%0A Rule(LinkExtractor(restrict_xpaths=XPATH_PAGINATION)),%0A )%0A%0A def start_requests(self):%0A %22%22%22No descrition%22%22%22%0A%0A url = self.tag%0A yield Request(url, dont_filter=True)%0A%0A def parse_entry(self, response):%0A %22%22%22No descrition%22%22%22%0A%0A if images := response.xpath(XPATH_IMAGE_SRC).getall():%0A yield %7B%0A 'title': response.xpath('//title/text()').get(),%0A 'url': response.url,%0A 'images': images%7D%0A%0A%0Aif __name__ == '__main__':%0A #cmdline.execute(f%22scrapy runspider %7Bsys.argv%5B0%5D%7D -a tag=%7Bsys.argv%5B1%5D%7D -O images.csv%22.split())%0A command_line = %5B%22scrapy%22, %22runspider%22%5D%0A command_line.extend(sys.argv)%0A cmdline.execute(command_line)%0A
|
|
285cddc3ed75f70e077738a206c50a57671245ea
|
add hello world script by pyThon
|
hello_flask.py
|
hello_flask.py
|
Python
| 0.000003 |
@@ -0,0 +1,168 @@
+# -*- coding: utf-8 -*-%0A%0Afrom flask import Flask%0Aapp = flask(__name__)%0A%[email protected]('/')%0Adef hello_flask():%0A%09return 'Hello Flask!'%0A%0Aif __name__ == '__main__':%0A%09app.run()
|
|
27ea547fbd7c936bd017b64b31ecf09ed991c6c0
|
Add index to fixed_ips.address
|
nova/db/sqlalchemy/migrate_repo/versions/085_add_index_to_fixed_ips_by_address.py
|
nova/db/sqlalchemy/migrate_repo/versions/085_add_index_to_fixed_ips_by_address.py
|
Python
| 0.000001 |
@@ -0,0 +1,1088 @@
+# Copyright 2012 IBM%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22); you may%0A# not use this file except in compliance with the License. You may obtain%0A# a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS, WITHOUT%0A# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the%0A# License for the specific language governing permissions and limitations%0A# under the License.%0A%0Afrom sqlalchemy import Index, MetaData, Table%0A%0A%0Adef upgrade(migrate_engine):%0A meta = MetaData()%0A meta.bind = migrate_engine%0A instances = Table('fixed_ips', meta, autoload=True)%0A index = Index('address', instances.c.address)%0A index.create(migrate_engine)%0A%0A%0Adef downgrade(migrate_engine):%0A meta = MetaData()%0A meta.bind = migrate_engine%0A instances = Table('fixed_ips', meta, autoload=True)%0A index = Index('address', instances.c.address)%0A index.drop(migrate_engine)%0A
|
|
65258cf8d11e8e5c7cce3e07d9a389e5617948dd
|
Add boilerplate code
|
aoc.py
|
aoc.py
|
Python
| 0.001915 |
@@ -0,0 +1,1121 @@
+import argparse%0Aimport importlib%0Aimport sys%0A%0Aif __name__ == %22__main__%22:%0A%0A parser = argparse.ArgumentParser(description=%22Advent of Code 2016%22)%0A parser.add_argument(%22--day%22, type=int, dest=%22days%22, nargs=%22+%22, default=range(1, 25))%0A parser.add_argument(%22--stdin%22, dest='stdin', action='store_true', default=False)%0A args = parser.parse_args()%0A%0A print(%22Advent of Code 2016%22)%0A print(%22===================%22)%0A print()%0A%0A for day in args.days:%0A try:%0A problem_module = importlib.import_module(%22day_%7B%7D%22.format(day))%0A input_file = open(%22day_%7B%7D.txt%22.format(day)) if not args.stdin else sys.stdin%0A problem = problem_module.Problem(input_file)%0A print(%22Day%22, day)%0A print(%22------%22)%0A if hasattr(problem, 'step1') and callable(getattr(problem, 'step1')):%0A print(%22Step 1:%22, problem.step1())%0A if hasattr(problem, 'step2') and callable(getattr(problem, 'step2')):%0A print(%22Step 2:%22, problem.step2())%0A print()%0A except ImportError as e:%0A print(%22Day%22, day, %22is not implemented yet%22)%0A
|
|
02a5a09334eddd8003933948a8ea13e249a4a9dd
|
Tune bench expectation algorithm parameters.
|
bench/gen_bench_expectations.py
|
bench/gen_bench_expectations.py
|
#!/usr/bin/env python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Generate bench_expectations file from a given set of bench data files. """
import argparse
import bench_util
import os
import re
import sys
# Parameters for calculating bench ranges.
RANGE_RATIO_UPPER = 1.0 # Ratio of range for upper bounds.
RANGE_RATIO_LOWER = 1.5 # Ratio of range for lower bounds.
ERR_RATIO = 0.05 # Further widens the range by the ratio of average value.
ERR_ABS = 0.5 # Adds an absolute error margin to cope with very small benches.
# List of bench configs to monitor. Ignore all other configs.
CONFIGS_TO_INCLUDE = ['simple_viewport_1000x1000',
'simple_viewport_1000x1000_gpu',
'simple_viewport_1000x1000_scalar_1.100000',
'simple_viewport_1000x1000_scalar_1.100000_gpu',
]
def compute_ranges(benches):
"""Given a list of bench numbers, calculate the alert range.
Args:
benches: a list of float bench values.
Returns:
a list of float [lower_bound, upper_bound].
"""
minimum = min(benches)
maximum = max(benches)
diff = maximum - minimum
avg = sum(benches) / len(benches)
return [minimum - diff * RANGE_RATIO_LOWER - avg * ERR_RATIO - ERR_ABS,
maximum + diff * RANGE_RATIO_UPPER + avg * ERR_RATIO + ERR_ABS]
def create_expectations_dict(revision_data_points):
"""Convert list of bench data points into a dictionary of expectations data.
Args:
revision_data_points: a list of BenchDataPoint objects.
Returns:
a dictionary of this form:
keys = tuple of (config, bench) strings.
values = list of float [expected, lower_bound, upper_bound] for the key.
"""
bench_dict = {}
for point in revision_data_points:
if (point.time_type or # Not walltime which has time_type ''
not point.config in CONFIGS_TO_INCLUDE):
continue
key = (point.config, point.bench)
if key in bench_dict:
raise Exception('Duplicate bench entry: ' + str(key))
bench_dict[key] = [point.time] + compute_ranges(point.per_iter_time)
return bench_dict
def main():
"""Reads bench data points, then calculate and export expectations.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-a', '--representation_alg', default='25th',
help='bench representation algorithm to use, see bench_util.py.')
parser.add_argument(
'-b', '--builder', required=True,
help='name of the builder whose bench ranges we are computing.')
parser.add_argument(
'-d', '--input_dir', required=True,
help='a directory containing bench data files.')
parser.add_argument(
'-o', '--output_file', required=True,
help='file path and name for storing the output bench expectations.')
parser.add_argument(
'-r', '--git_revision', required=True,
help='the git hash to indicate the revision of input data to use.')
args = parser.parse_args()
builder = args.builder
data_points = bench_util.parse_skp_bench_data(
args.input_dir, args.git_revision, args.representation_alg)
expectations_dict = create_expectations_dict(data_points)
out_lines = []
keys = expectations_dict.keys()
keys.sort()
for (config, bench) in keys:
(expected, lower_bound, upper_bound) = expectations_dict[(config, bench)]
out_lines.append('%(bench)s_%(config)s_,%(builder)s-%(representation)s,'
'%(expected)s,%(lower_bound)s,%(upper_bound)s' % {
'bench': bench,
'config': config,
'builder': builder,
'representation': args.representation_alg,
'expected': expected,
'lower_bound': lower_bound,
'upper_bound': upper_bound})
with open(args.output_file, 'w') as file_handle:
file_handle.write('\n'.join(out_lines))
if __name__ == "__main__":
main()
|
Python
| 0.998397 |
@@ -393,17 +393,17 @@
PER = 1.
-0
+2
# Rati
@@ -453,17 +453,17 @@
WER = 1.
-5
+8
# Rati
@@ -506,17 +506,17 @@
IO = 0.0
-5
+8
# Furt
|
813eb3b6bdc01906e39f11f93b4a326fc2fb1ee5
|
Add kitchen-sink base test
|
test/base.py
|
test/base.py
|
Python
| 0.000001 |
@@ -0,0 +1,891 @@
+import torch%0Afrom torch.autograd import Variable%0Aimport torch.nn as nn%0Aimport torch.nn.functional as F%0A%0Aimport os%0Aimport uuid%0Aimport torch2c%0A%0A%0Adef base_test():%0A%0A fc1 = nn.Linear(10,20)%0A fc1.weight.data.normal_(0.0,1.0)%0A fc1.bias.data.normal_(0.0,1.0)%0A%0A fc2 = nn.Linear(20,2)%0A fc2.weight.data.normal_(0.0,1.0)%0A fc2.bias.data.normal_(0.0,1.0)%0A%0A model_0 = lambda x: F.log_softmax(fc2(F.relu(fc1(x))))%0A%0A fc3 = nn.Linear(10,2)%0A fc3.weight.data.normal_(0.0,1.0)%0A fc3.bias.data.normal_(0.0,1.0)%0A%0A model_1 = lambda x: F.softmax(F.relu(fc3(x)))%0A%0A data = Variable(torch.rand(10,10))%0A%0A out = model_0(data) + model_1(data) + 1%0A%0A out_path = 'out'%0A if not os.path.isdir(out_path):%0A os.mkdir(out_path)%0A uid = str(uuid.uuid4())%0A%0A torch2c.compile(out,'base',os.path.join(out_path,uid),compile_test=True)%0A %0A%0Aif __name__=='__main__':%0A%0A base_test()%0A%0A
|
|
407f7fcf8f481c57df59789b7f845928428f1bf9
|
Add example script.
|
telegrambot/example.py
|
telegrambot/example.py
|
Python
| 0 |
@@ -0,0 +1,209 @@
+from telegrambot import TelegramBot, main%0Afrom telegrambot.commands import GetCommand%0A%0A%0Aclass DemoTelegramBot(TelegramBot, GetCommand):%0A pass%0A%0A%0Aif __name__ == '__main__':%0A main(bot_class=DemoTelegramBot)
|
|
9fdd671d9c0b91dc789ebf3b24226edb3e6a072a
|
Add new migration to load metrics fixtures
|
sleep/migrations/0002_load_metrics.py
|
sleep/migrations/0002_load_metrics.py
|
Python
| 0 |
@@ -0,0 +1,351 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0Afrom django.core.management import call_command%0A%0A%0Adef load_metrics():%0A call_command('loaddata', 'metrics.json')%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('sleep', '0001_initial'),%0A %5D%0A%0A operations = %5B%0A %5D%0A
|
|
ef4d7e4fb43b5db29576f95625fd612c259731be
|
Create ServoSync.py
|
home/Mats/ServoSync.py
|
home/Mats/ServoSync.py
|
Python
| 0.000001 |
@@ -0,0 +1,301 @@
+port = %22COM99%22%0Aarduino = Runtime.start(%22arduino%22,%22Arduino%22)%0Avard = Runtime.start(%22va%22,%22VirtualArduino%22)%0Avard.connect(port)%0Aarduino.connect(port)%0Aservo1 = Runtime.start(%22servo1%22,%22Servo%22)%0Aservo2 = Runtime.start(%22servo2%22,%22Servo%22)%0Aservo1.attach(%22arduino%22,1)%0Aservo2.attach(%22arduino%22,2)%0Aservo1.sync(servo2)%0A
|
|
ecfbaded5e03529d1b189b6b5fc8b2f8516c4b31
|
Add hoster plugin for ARD mediathek
|
module/plugins/hoster/ARD.py
|
module/plugins/hoster/ARD.py
|
Python
| 0 |
@@ -0,0 +1,2481 @@
+%0Aimport subprocess%0Aimport re%0Aimport os.path%0Aimport os%0A%0Afrom module.utils import save_join, save_path%0Afrom module.plugins.Hoster import Hoster%0A%0A# Requires rtmpdump%0A# by Roland Beermann%0A%0Aclass RTMP:%0A # TODO: Port to some RTMP-library like rtmpy or similar%0A # TODO?: Integrate properly into the API of pyLoad%0A%0A command = %22rtmpdump%22%0A%0A @classmethod%0A def download_rtmp_stream(cls, url, output_file, playpath=None):%0A opts = %5B%0A %22-r%22, url,%0A %22-o%22, output_file,%0A %5D%0A if playpath:%0A opts.append(%22--playpath%22)%0A opts.append(playpath)%0A%0A cls._invoke_rtmpdump(opts)%0A%0A @classmethod%0A def _invoke_rtmpdump(cls, opts):%0A args = %5B%0A cls.command%0A %5D%0A args.extend(opts)%0A%0A return subprocess.check_call(args)%0A%0Aclass ARD(Hoster):%0A __name__ = %22ARD Mediathek%22%0A __version__ = %220.1%22%0A __pattern__ = r%22http://www%5C.ardmediathek%5C.de/.*%22%0A __config__ = %5B%5D%0A%0A def process(self, pyfile):%0A site = self.load(pyfile.url)%0A%0A avail_videos = re.findall(r%22%22%22mediaCollection.addMediaStream%5C(0, (%5B0-9%5D*), %22(%5B%5E%5C%22%5D*)%22, %22(%5B%5E%5C%22%5D*)%22, %22%5B%5E%5C%22%5D*%22%5C);%22%22%22, site)%0A avail_videos.sort(key=lambda videodesc: int(videodesc%5B0%5D), reverse=True) # The higher the number, the better the quality%0A%0A quality, url, playpath = avail_videos%5B0%5D%0A%0A pyfile.name = re.search(r%22%3Ch1%3E(%5B%5E%3C%5D*)%3C/h1%3E%22, site).group(1)%0A%0A if url.startswith(%22http%22):%0A # Best quality is available over HTTP. Very rare.%0A self.download(url)%0A else:%0A pyfile.setStatus(%22downloading%22)%0A%0A download_folder = self.config%5B'general'%5D%5B'download_folder'%5D%0A%0A location = save_join(download_folder, pyfile.package().folder)%0A%0A if not os.path.exists(location):%0A os.makedirs(location, int(self.core.config%5B%22permission%22%5D%5B%22folder%22%5D, 8))%0A%0A if self.core.config%5B%22permission%22%5D%5B%22change_dl%22%5D and os.name != %22nt%22:%0A try:%0A uid = getpwnam(self.config%5B%22permission%22%5D%5B%22user%22%5D)%5B2%5D%0A gid = getgrnam(self.config%5B%22permission%22%5D%5B%22group%22%5D)%5B2%5D%0A%0A chown(location, uid, gid)%0A except Exception, e:%0A self.log.warning(_(%22Setting User and Group failed: %25s%22) %25 str(e))%0A%0A output_file = save_join(location, save_path(pyfile.name))%0A%0A RTMP.download_rtmp_stream(url, playpath=playpath, output_file=output_file)%0A
|
|
b4b2775b115fb6ec506e982a9f98ee78342eb317
|
the default sequence = 0
|
res_currency_sequence/res_currency_sequence.py
|
res_currency_sequence/res_currency_sequence.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import copy
from openerp.osv import osv, fields
from openerp.tools.translate import _
class ResCurrency(osv.osv):
_name = "res.currency"
_inherit = "res.currency"
_columns = {
'sequence': fields.integer('Sequence', required=True, help='Use to arrange calculation sequence', select=True),
}
_defaults = {
'sequence': 0,
}
_sql_constraints = [
('res_currency_sequence', 'unique(sequence)', 'Sequence must be unique per currency!'),
]
_order = 'sequence'
def _current_rate_computation(self, cr, uid, ids, name, arg, raise_on_no_rate, context=None):
if 'second_rate' in context:
second_rate = context['second_rate']
if second_rate:
if context is None:
context = {}
res = {}
if 'date' in context:
date = context['date']
else:
date = time.strftime('%Y-%m-%d')
date = date or time.strftime('%Y-%m-%d')
# Convert False values to None ...
currency_rate_type = context.get('currency_rate_type_id') or None
# ... and use 'is NULL' instead of '= some-id'.
operator = '=' if currency_rate_type else 'is'
for id in ids:
cr.execute("SELECT currency_id, second_rate FROM res_currency_rate WHERE currency_id = %s AND name <= %s AND currency_rate_type_id " + operator +" %s ORDER BY name desc LIMIT 1" ,(id, date, currency_rate_type))
if cr.rowcount:
id, rate = cr.fetchall()[0]
res[id] = rate
elif not raise_on_no_rate:
res[id] = 0
else:
raise osv.except_osv(_('Error!'),_("No currency rate associated for currency %d for the given period" % (id)))
else:
res = super(ResCurrency, self)._current_rate_computation(cr, uid, ids, name, arg, raise_on_no_rate, context)
else:
res = super(ResCurrency, self)._current_rate_computation(cr, uid, ids, name, arg, raise_on_no_rate, context)
return res
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
default.update({
'sequence': None,
})
return super(ResCurrency, self).copy(cr, uid, id, default, context)
def get_exchange_rate(self, cr, uid, res_currency_initial, res_currency_finally, name, context=None):
"""
:param name: date of exchange rate
"""
res_obj = self.pool.get('res.currency.rate')
result = 0.00
copy_context = copy.copy(context)
copy_context.update({'date':name})
res_currency_base_id = self.search(cr, uid, [('base', '=', True)])
res_currency_base = self.browse(cr, uid, res_currency_base_id)[0]
if res_currency_initial.id == res_currency_base.id:
exchange_rate_dict = self.pool.get('res.currency')._current_rate(cr, uid, [res_currency_finally.id], name, arg=None, context=copy_context)
result = exchange_rate_dict[res_currency_finally.id]
elif res_currency_initial.id != res_currency_finally.id:
currency_rate_initial = self.pool.get('res.currency')._current_rate(cr, uid, [res_currency_initial.id], name, arg=None, context=copy_context)[res_currency_initial.id]
currency_rate_finally = self.pool.get('res.currency')._current_rate(cr, uid, [res_currency_finally.id], name, arg=None, context=copy_context)[res_currency_finally.id]
result = currency_rate_initial * currency_rate_finally
else:
result = 1.00
return result
|
Python
| 0.999997 |
@@ -1368,55 +1368,8 @@
%0A
- _defaults = %7B%0A 'sequence': 0,%0A %7D%0A
|
206ef4f7aad6c4ce51e4737a7d506a79061f1047
|
Add an `import_or_skip` function to testing.
|
distarray/testing.py
|
distarray/testing.py
|
import unittest
from functools import wraps
from distarray.error import InvalidCommSizeError
from distarray.mpiutils import MPI, create_comm_of_size
def comm_null_passes(fn):
"""Decorator. If `self.comm` is COMM_NULL, pass."""
@wraps(fn)
def wrapper(self, *args, **kwargs):
if self.comm == MPI.COMM_NULL:
pass
else:
return fn(self, *args, **kwargs)
return wrapper
class MpiTestCase(unittest.TestCase):
"""Base test class for MPI test cases.
Overload `get_comm_size` to change the default comm size (default is
4). Overload `more_setUp` to add more to the default `setUp`.
"""
def get_comm_size(self):
return 4
def more_setUp(self):
pass
def setUp(self):
try:
self.comm = create_comm_of_size(self.get_comm_size())
except InvalidCommSizeError:
msg = "Must run with comm size >= {}."
raise unittest.SkipTest(msg.format(self.get_comm_size()))
else:
self.more_setUp()
def tearDown(self):
if self.comm != MPI.COMM_NULL:
self.comm.Free()
|
Python
| 0 |
@@ -9,16 +9,33 @@
nittest%0A
+import importlib%0A
from fun
@@ -162,16 +162,691 @@
_size%0A%0A%0A
+def import_or_skip(name):%0A %22%22%22Try importing %60name%60, raise SkipTest on failure.%0A%0A Parameters%0A ----------%0A name : str%0A Module name to try to import.%0A%0A Returns%0A -------%0A module : module object%0A Module object imported by importlib.%0A%0A Raises%0A ------%0A unittest.SkipTest%0A If the attempted import raises an ImportError.%0A%0A Examples%0A --------%0A %3E%3E%3E h5py = import_or_skip('h5py')%0A %3E%3E%3E h5py.get_config()%0A %3Ch5py.h5.H5PYConfig at 0x103dd5a78%3E%0A%0A %22%22%22%0A try:%0A return importlib.import_module(name)%0A except ImportError:%0A errmsg = '%25s not found... skipping.' %25 name%0A raise unittest.SkipTest(errmsg)%0A%0A%0A
def comm
|
80c1dba49bbdaf4d0d37e8a06549774d2afd019a
|
Add cosmo_viewer app
|
pvapp/cosmo_viewer.py
|
pvapp/cosmo_viewer.py
|
Python
| 0 |
@@ -0,0 +1,3196 @@
+################################################################################%0A#%0A# Copyright 2013 Kitware, Inc.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A#%0A################################################################################%0A%0A# import to process args%0Aimport sys%0Aimport os%0Aimport math%0Aimport json%0A%0Aimport argparse%0A%0A# import annotations%0Afrom autobahn.wamp import exportRpc%0A%0A# import paraview modules.%0Afrom paraview import simple, web, servermanager, web_helper, paraviewweb_wamp, paraviewweb_protocols%0A%0A# Setup global variables%0Atimesteps = %5B%5D%0AcurrentTimeIndex = 0%0Aview = None%0AdataPath = None%0AauthKey = None%0A%0Adef initView(width, height):%0A global view%0A view = simple.GetRenderView()%0A simple.Render()%0A view.ViewSize = %5Bwidth, height%5D%0A view.Background = %5B0.0, 0.0, 0.0%5D%0A view.OrientationAxesLabelColor = %5B0, 0, 0%5D%0A%0A# This class defines the exposed RPC methods for the midas application%0Aclass CosmoApp(paraviewweb_wamp.ServerProtocol):%0A def initialize(self):%0A global authKey%0A%0A # Bring used components%0A self.registerParaViewWebProtocol(paraviewweb_protocols.ParaViewWebMouseHandler())%0A self.registerParaViewWebProtocol(paraviewweb_protocols.ParaViewWebViewPort())%0A self.registerParaViewWebProtocol(paraviewweb_protocols.ParaViewWebViewPortImageDelivery())%0A self.registerParaViewWebProtocol(paraviewweb_protocols.ParaViewWebViewPortGeometryDelivery())%0A%0A # Update authentication key to use%0A #self.updateSecret(authKey)%0A%0A @exportRpc(%22openFile%22)%0A def openFile(self, filename):%0A fileid = %22%22%0A if self.reader:%0A try:%0A simple.Delete(self.reader)%0A except:%0A self.reader = None%0A try:%0A self.reader = simple.OpenDataFile(filename)%0A simple.Show()%0A simple.Render()%0A simple.ResetCamera()%0A fileid = self.reader.GetGlobalIDAsString()%0A except:%0A self.reader = None%0A return fileid%0A%0Aif __name__ == %22__main__%22:%0A parser = argparse.ArgumentParser(%0A description=%22Midas+ParaViewWeb application%22)%0A web.add_arguments(parser)%0A parser.add_argument(%22--data-dir%22, default=os.getcwd(),%0A help=%22path to data directory%22, dest=%22path%22)%0A parser.add_argument(%22--width%22, default=575,%0A help=%22width of the render window%22, dest=%22width%22)%0A parser.add_argument(%22--height%22, default=575,%0A help=%22height of the render window%22, dest=%22height%22)%0A args = parser.parse_args()%0A%0A dataPath = args.path%0A authKey = args.authKey%0A width = args.width%0A height = args.height%0A%0A initView(width, height)%0A web.start_webserver(options=args, protocol=CosmoApp)%0A
|
|
99ed96105fcbaa7b2836d19e1dde17bc49f23327
|
Commit the basic skeleton
|
crawl_ptt.py
|
crawl_ptt.py
|
Python
| 0.999616 |
@@ -0,0 +1,1240 @@
+#!/usr/bin/env python%0A%0A%0Afrom pprint import pprint%0Aimport logging%0Afrom bs4 import BeautifulSoup%0Aimport requests%0A%0A%0Alogging.basicConfig(%0A format=(%0A '%25(asctime)s%5Ct%25(levelname)s%5Ct'%0A #'%25(processName)s%5Ct%25(threadName)s%5Ct'%0A '%25(name)s%5Ct%25(funcName)s:%25(lineno)d%5Ct'%0A '%25(message)s'%0A ),%0A level=logging.DEBUG%0A)%0A%0A%0Adef make_fake_browser():%0A%0A fake_browser = requests.Session()%0A fake_browser.headers = %7B%0A 'user-agent': (%0A 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) '%0A 'AppleWebKit/537.36 (KHTML, like Gecko) '%0A 'Chrome/54.0.2840.98 Safari/537.36'%0A ),%0A 'accept': (%0A 'text/html,application/xhtml+xml,application/xml;q=0.9,'%0A 'image/webp,*/*;q=0.8'%0A ),%0A 'accept-encoding': 'gzip, deflate, sdch, br',%0A 'accept-language': 'en-US,en;q=0.8,zh-TW;q=0.6,zh;q=0.4',%0A 'cookie': 'over18=1',%0A %7D%0A%0A return fake_browser%0A%0A%0Adef save_index_html():%0A%0A fake_browser = make_fake_browser()%0A resp = fake_browser.get('https://www.ptt.cc/bbs/Gossiping/index.html')%0A%0A with open('index.html', 'w') as f:%0A f.write(resp.text)%0A%0A logging.info('Saved index.html')%0A%0A%0Aif __name__ == '__main__':%0A%0A save_index_html()%0A
|
|
8287876963af72756c3ff9102526c56f3e28a8a2
|
Test for file resources
|
tests/functional_tests/test_resources/test_file_resource.py
|
tests/functional_tests/test_resources/test_file_resource.py
|
Python
| 0 |
@@ -0,0 +1,530 @@
+# -*- coding: utf8 -*-%0A%0A%0Afrom tuttle.resources import FileResource%0Aimport tuttle.resources%0Afrom os import path%0A%0Aclass TestHttpResource():%0A%0A def test_real_resource_exists(self):%0A %22%22%22A real resource should exist%22%22%22%0A file_url = %22file://%7B%7D%22.format(path.abspath(tuttle.resources.__file__))%0A res = FileResource(file_url)%0A assert res.exists()%0A%0A def test_fictive_resource_exists(self):%0A %22%22%22A real resource should exist%22%22%22%0A res = FileResource(%22fictive_file%22)%0A assert not res.exists()%0A
|
|
14755cda032b5cb44626b2da66d943517427f947
|
test for malformed db imports
|
tests/test_core.py
|
tests/test_core.py
|
Python
| 0 |
@@ -0,0 +1,213 @@
+%22%22%22unit tests for core.py%22%22%22%0A%0Aimport pytest%0A%0Aimport core%0A%0A%0Adef test_malformed_linkdatabase():%0A # pytest.set_trace()%0A with pytest.raises(EOFError):%0A core.LinkDatabase().load(db='tests/garbage.pickle')%0A
|
|
3a59057f7465d9982e26b92cddafa0ea9ba48806
|
Add new package: universal-ctags (#18962)
|
var/spack/repos/builtin/packages/universal-ctags/package.py
|
var/spack/repos/builtin/packages/universal-ctags/package.py
|
Python
| 0 |
@@ -0,0 +1,803 @@
+# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass UniversalCtags(AutotoolsPackage):%0A %22%22%22Universal Ctags generates an index (or tag) file of language%0A objects found in source files for many popular programming languages.%0A This index makes it easy for text editors and other tools to locate%0A the indexed items.%22%22%22%0A%0A homepage = %22https://ctags.io/%22%0A git = %22https://github.com/universal-ctags/ctags.git%22%0A%0A version('master', branch='master')%0A%0A depends_on('autoconf', type='build')%0A depends_on('automake', type='build')%0A depends_on('libtool', type='build')%0A depends_on('m4', type='build')%0A
|
|
f29e278a1b661224c9580d8275654a8c6fe7d3cf
|
add test for http.encode_request
|
tests/test_http.py
|
tests/test_http.py
|
Python
| 0 |
@@ -0,0 +1,1145 @@
+%22%22%22Test bbs2ch.http module.%22%22%22%0Afrom bbs2ch import http%0A%0A%0Adef test_host_path():%0A %22%22%22Return hostname and path from url.%22%22%22%0A assert (u'hoge.com', '/') == http.host_path(u'http://hoge.com/')%0A%0A%0Adef test_encode_request_get():%0A %22%22%22Return http request string.%22%22%22%0A header = %5B(u'Key', u'Value'),%0A (u'Key2', u'Value2')%5D%0A%0A assert ('GET / HTTP/1.1%5Cr%5Cn'%0A 'Key: Value%5Cr%5Cn'%0A 'Key2: Value2%5Cr%5Cn'%0A '%5Cr%5Cn'%0A '%5Cr%5Cn' ==%0A http.encode_request('GET', u'/', header))%0A%0A%0Adef test_encode_request_post():%0A %22%22%22Return http request string.%0A%0A if body is not empty, add header to Content-length and Content-Type.%0A %22%22%22%0A header = %5B(u'Key', u'Value'),%0A (u'Key2', u'Value2')%5D%0A%0A body = %5B(u'key', u'value'),%0A (u'key2', u'value2')%5D%0A%0A assert ('POST / HTTP/1.1%5Cr%5Cn'%0A 'Key: Value%5Cr%5Cn'%0A 'Key2: Value2%5Cr%5Cn'%0A 'Content-Type: application/x-www-form-urlencoded%5Cr%5Cn'%0A 'Content-Length: 21%5Cr%5Cn'%0A '%5Cr%5Cn'%0A 'key=value&key2=value2%5Cr%5Cn'%0A ==%0A http.encode_request(u'POST', u'/', header, body))%0A
|
|
f032556bf07b37f9544c71ecad7aed472021bc97
|
Add script to update giving and teams receiving
|
sql/branch.py
|
sql/branch.py
|
Python
| 0 |
@@ -0,0 +1,591 @@
+import sys%0A%0Afrom gratipay import wireup%0A%0Adb = wireup.db(wireup.env())%0A%0Aparticipants = db.all(%22%22%22%0A SELECT p.*::participants%0A FROM participants p%0A WHERE (%0A SELECT error%0A FROM current_exchange_routes er%0A WHERE er.participant = p.id%0A AND network = 'braintree-cc'%0A ) %3C%3E ''%0A%22%22%22)%0A%0Atotal = len(participants)%0A%0Aprint(%22%25s participants with failing cards%22 %25 total)%0A%0Acounter = 1%0A%0Afor p in participants:%0A sys.stdout.write(%22%5CrUpdating (%25i/%25i)%22 %25 (counter, total))%0A sys.stdout.flush()%0A counter += 1%0A%0A p.update_giving_and_teams()%0A%0Aprint(%22Done!%22)%0A
|
|
da66b2a2a2e2a73ffd986aea6ba5d086d43892fc
|
Add main smoketest
|
tests/test_main.py
|
tests/test_main.py
|
Python
| 0.000002 |
@@ -0,0 +1,165 @@
+import unittest%0Aimport sys%0Afrom chaser import main%0A%0A%0Aclass TestMain(unittest.TestCase):%0A%0A def test_smoke_main(self):%0A sys.argv = %5B%22chaser%22%5D%0A main()%0A
|
|
ad1d349d49072b5bda6641db4f070704fde81e5f
|
Add FCC.
|
inspectors/fcc.py
|
inspectors/fcc.py
|
Python
| 0 |
@@ -0,0 +1,2604 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0Aimport datetime%0Aimport logging%0Aimport os%0Afrom urllib.parse import urljoin%0A%0Afrom bs4 import BeautifulSoup%0Afrom utils import utils, inspector%0A%0A# http://transition.fcc.gov/oig/oigreportsaudit.html%0A# Oldest report: 1994%0A%0A# options:%0A# standard since/year options for a year range to fetch from.%0A#%0A# Notes for IG's web team:%0A%0AAUDIT_REPORTS_URL = %22http://transition.fcc.gov/oig/oigreportsaudit.html%22%0ASEMIANNUAL_REPORTS_URL = %22http://transition.fcc.gov/oig/oigreportssemiannual.html%22%0AOTHER_REPORTS_URL = %22http://transition.fcc.gov/oig/oigreportsletters.html%22%0A%0Adef run(options):%0A year_range = inspector.year_range(options)%0A%0A for url in %5BAUDIT_REPORTS_URL, SEMIANNUAL_REPORTS_URL, OTHER_REPORTS_URL%5D:%0A doc = beautifulsoup_from_url(url)%0A results = doc.find_all(%22table%22, %7B%22border%22: 2%7D)%5B0%5D.select(%22tr%22)%0A for index, result in enumerate(results):%0A if index %3C 2:%0A # The first two rows are headers%0A continue%0A report = report_from(result, url, year_range)%0A if report:%0A inspector.save_report(report)%0A%0Adef report_from(result, page_url, year_range):%0A if not result.text.strip():%0A # Nothing in the entire row, just an empty row%0A return%0A%0A report_url = urljoin(page_url, result.select(%22td a%22)%5B0%5D.get('href'))%0A report_filename = report_url.split(%22/%22)%5B-1%5D%0A report_id, extension = os.path.splitext(report_filename)%0A%0A published_on_text = result.select(%22td%22)%5B0%5D.text.split(%22%5Cr%5Cn%22)%5B0%5D.strip()%0A%0A if len(result.select(%22td%22)) == 2:%0A # Semiannual report%0A published_on_text = published_on_text.split(%22to%22)%5B-1%5D.split(%22through%22)%5B-1%5D.strip()%0A published_on = datetime.datetime.strptime(published_on_text, '%25B %25d, %25Y')%0A title = %22Semi-Annual Report - %7B%7D%22.format(published_on_text)%0A else:%0A try:%0A published_on = datetime.datetime.strptime(published_on_text, '%25m/%25d/%25y')%0A except ValueError:%0A published_on = datetime.datetime.strptime(published_on_text, '%25m/%25d/%25Y')%0A title = result.select(%22td%22)%5B1%5D.text.strip()%0A%0A if published_on.year not in year_range:%0A logging.debug(%22%5B%25s%5D Skipping, not in requested range.%22 %25 report_url)%0A return%0A%0A report = %7B%0A 'inspector': 'fcc',%0A 'inspector_url': 'http://fcc.gov/oig/',%0A 'agency': 'fcc',%0A 'agency_name': %22Federal Communications Commission%22,%0A 'report_id': report_id,%0A 'url': report_url,%0A 'title': title,%0A 'published_on': datetime.datetime.strftime(published_on, %22%25Y-%25m-%25d%22),%0A %7D%0A return report%0A%0A%0Adef beautifulsoup_from_url(url):%0A body = utils.download(url)%0A return BeautifulSoup(body)%0A%0A%0Autils.run(run) if (__name__ == %22__main__%22) else None
|
|
c510b27dea59eeae229cf30dabc39ae083f286b0
|
Add better indexes
|
ureport/stats/migrations/0017_better_indexes.py
|
ureport/stats/migrations/0017_better_indexes.py
|
Python
| 0.001981 |
@@ -0,0 +1,855 @@
+# Generated by Django 3.2.6 on 2021-09-27 17:49%0A%0Afrom django.db import migrations%0A%0AINDEX_POLLSTATS_ORG_RESULT_SQL = %22%22%22%0ACREATE INDEX IF NOT EXISTS stats_pollstats_org_result on stats_pollstats (org_id, flow_result_id) WHERE flow_result_id IS NOT NULL;%0A%22%22%22%0A%0A%0AINDEX_POLLSTATS_ORG_QST_RST_CAT_SQL = %22%22%22%0ACREATE INDEX IF NOT EXISTS stats_pollstats_org_qstn_rslt_cat_age_gndr_schm_date_not_null on stats_pollstats (org_id, question_id, flow_result_id, category_id, flow_result_category_id, age_segment_id, gender_segment_id, scheme_segment_id, location_id, date) WHERE date IS NOT NULL;%0A%22%22%22%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A (%22stats%22, %220016_pollstats_scheme_segment%22),%0A %5D%0A%0A operations = %5B%0A migrations.RunSQL(INDEX_POLLSTATS_ORG_RESULT_SQL),%0A migrations.RunSQL(INDEX_POLLSTATS_ORG_QST_RST_CAT_SQL),%0A %5D%0A
|
|
b304b1087d69d4142a9df5ad2db339e5aafe3331
|
Update category
|
news/views.py
|
news/views.py
|
from django.shortcuts import render, redirect,render_to_response
from django.template import RequestContext
from django.http import HttpResponse
from django.urls import reverse
from django.views.generic import TemplateView,DetailView
# Create your views here
from news.models import Slider,How_it_works,ArticleCategory,Contact_us,ArticleCategory,Article,RelationCategoryArticle,ArticleImages
"""
Just in case test views
"""
def index(request):
return redirect(reverse('main-index'))
class TemplateAllData(TemplateView):
def get_context_data(self, **kwargs):
context = super(TemplateAllData, self).get_context_data(**kwargs)
context['categorys'] = ArticleCategory.objects.all()
context['contact'] = Contact_us.objects.all()
return context
class TestView(TemplateAllData):
template_name = 'index.html'
def get_context_data(self, **kwargs):
context = super(TestView, self).get_context_data(**kwargs)
context['slider'] = Slider.objects.filter(status=True)
context['how_it'] = How_it_works.objects.all().order_by('id')
context['feed'] = Article.objects.filter(status=True,home_page_status=True)
return context
class AboutView(TemplateAllData):
template_name = 'index-1.html'
class GalleryView(TemplateAllData):
template_name = 'index-2.html'
def get_context_data(self, **kwargs):
context = super(GalleryView, self).get_context_data(**kwargs)
context['albom'] = ArticleImages.objects.all()
return context
class ContactsView(TemplateAllData):
template_name = 'index-4.html'
class PrivacyView(TemplateAllData):
template_name = 'index-5.html'
class CategoryDetailView(DetailView):
model = ArticleCategory
template_name = 'index-3.html'
def get_context_data(self, **kwargs):
context = super(CategoryDetailView, self).get_context_data(**kwargs)
context['categorys'] = ArticleCategory.objects.all()
context['contact'] = Contact_us.objects.all()
context['cat_feed'] = RelationCategoryArticle.objects.filter(category_obj__slug=kwargs.get('slug'))
return context
|
Python
| 0.000001 |
@@ -2105,16 +2105,21 @@
j__slug=
+self.
kwargs.g
|
fb86dcdd6046c7d35e932396ba541671727b4d01
|
rearrange imports to standards
|
ngSe/utils.py
|
ngSe/utils.py
|
from time import time, sleep
from functools import wraps
from .exceptions import element_exceptions
def retry(f=None, timeout=30, interval=0.1):
"""
When working with a responsive UI, sometimes elements are not ready at the very second you request it
This wrapper will keep on retrying finding or interacting with the element until its ready
"""
# This allows us to use '@retry' or '@retry(timeout=thing, interval=other_thing)' for custom times
if f is None:
def rwrapper(f):
return retry(f, timeout, interval)
return rwrapper
@wraps(f)
def wrapper(*args, **kwargs):
# The wrapped function gets the optional arguments retry_timeout and retry_interval added
retry_timeout = kwargs.pop('retry_timeout', timeout)
retry_interval = kwargs.pop('retry_interval', interval)
prep = kwargs.pop('prep', None)
end_time = time() + retry_timeout
while True:
try:
if prep is not None:
prep()
return f(*args, **kwargs)
except element_exceptions:
if time() > end_time:
# timeout, re-raise the original exception
raise
sleep(retry_interval)
return wrapper
|
Python
| 0.021967 |
@@ -2,59 +2,59 @@
rom
-time import time, sleep%0Afrom functools import wraps
+functools import wraps%0Afrom time import time, sleep
%0A%0Afr
|
d648aeb90158cb104ac6548887a39dc13dfa236f
|
add management cmd make_emails_lowercase
|
corehq/apps/users/management/commands/make_emails_lowercase.py
|
corehq/apps/users/management/commands/make_emails_lowercase.py
|
Python
| 0.000212 |
@@ -0,0 +1,469 @@
+from django.core.management import BaseCommand%0A%0Afrom corehq.apps.users.models import CouchUser%0A%0A%0Aclass Command(BaseCommand):%0A help = %22Makes emails into lowercase%22%0A%0A def handle(self, *args, **options):%0A for couch_user in CouchUser.all():%0A if couch_user.email and any(char.isupper() for char in couch_user.email):%0A print couch_user.email%0A couch_user.email = couch_user.email.lower()%0A couch_user.save()%0A
|
|
9d2976200965c4ea6b324d0822f6be786a25f2ea
|
Add file containing filesystem utilities
|
refmanage/fs_utils.py
|
refmanage/fs_utils.py
|
Python
| 0.000001 |
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-%0A
|
|
b674efd944bf124da60db90d90cc2da35761427d
|
Conform to pep8
|
shinken/bin.py
|
shinken/bin.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
# Gregory Starck, [email protected]
# Hartmut Goebel, [email protected]
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
This file is to be imported by every Shinken service component:
Arbiter, Scheduler, etc. It just checks for the main requirement of
Shinken.
"""
import sys
VERSION = "2.2"
# Make sure people are using Python 2.6 or higher
# This is the canonical python version check
if sys.version_info < (2, 6):
sys.exit("Shinken requires as a minimum Python 2.6.x, sorry")
elif sys.version_info >= (3,):
sys.exit("Shinken is not yet compatible with Python 3.x, sorry")
|
Python
| 0.999996 |
@@ -1388,9 +1388,8 @@
sorry%22)%0A
-%0A
|
6979bbf6547d689b1980762349a0e78c9c7c026d
|
Create fibonacci.py
|
python/fibonacci/fibonacci.py
|
python/fibonacci/fibonacci.py
|
Python
| 0.000838 |
@@ -0,0 +1,122 @@
+a = 0%0Ab = 1%0Ac = 0%0An = int(input(%22Nombre de termes : %22))%0A%0Afor i in range (1, n+1):%0A c = a+b%0A b = a%0A a= c%0Aprint(c)%0A
|
|
fcf691454b8607fec9d7f5cba43579dc02c26c8b
|
Check coverage of pgi, vs gi
|
tests/pgi_covergage.py
|
tests/pgi_covergage.py
|
Python
| 0 |
@@ -0,0 +1,1723 @@
+%22%22%22%0Afind pgi coverage of all gi.repositorys.%0Ayou need to have access to both 'gi' and 'pgi' in the current python%0Aenvironment.%0A%0AIn a virtualenv this works:%0A%0A$ pip install pgi%0A$ pip install vext.gi%0A%0A$ python pgi_coverage.py%0A%22%22%22%0A%0ATYPELIB_DIR=%22/usr/lib/girepository-1.0%22%0A%0Afrom os.path import basename%0Afrom glob import glob%0Afrom textwrap import dedent%0A%0Adef test_pgi_coverage(gi_module, pgi_module):%0A name_width = len(max(dir(gi_module), key=len))%0A print('%25s %25s' %25 (gi_module.__name__.rjust(name_width), pgi_module.__name__))%0A for name in dir(gi_module):%0A if name.startswith('_'):%0A continue%0A status = 'OK'%0A try:%0A getattr(pgi_module, name)%0A except NotImplementedError as e:%0A #status = %22FAIL: '%25s'%22 %25 str(e.__class__.__name__)%0A status = %22FAIL%22%0A for line in str(e).splitlines():%0A if line.startswith('NotImplementedError:'):%0A status = status + %22 %22 + line%0A print(%22%25s%5Ct%25s%22 %25 (name.rjust(name_width), status))%0A print(%22%22)%0A%0Adef test_coverage(typelib):%0A code = dedent(%22%22%22%0A from pgi.repository import %7B0%7D as PGI_%7B0%7D%0A from gi.repository import %7B0%7D as GI_%7B0%7D%0A%0A test_pgi_coverage(GI_%7B0%7D, PGI_%7B0%7D)%0A %22%22%22.format(typelib))%0A%0A try:%0A print(%22PGI coverage of %25s%22 %25 typelib)%0A exec(code)%0A except Exception as e:%0A print(%22Skipped because of %25s during test%22 %25 str(e))%0A%0Adef get_typelibs():%0A typelibs = %5B%5D%0A%0A for typelib in glob(TYPELIB_DIR + %22/*.typelib%22):%0A fn = basename(typelib).partition(%22-%22)%5B0%5D%0A typelibs.append(fn)%0A return typelibs%0A%0Aif __name__=='__main__':%0A typelibs = get_typelibs()%0A for typelib in typelibs:%0A test_coverage(typelib)%0A
|
|
7c1cbc49e6cdc6ef514382eee9679f4e9719257b
|
add basic-calculator-ii
|
vol5/basic-calculator-ii/basic-calculator-ii.py
|
vol5/basic-calculator-ii/basic-calculator-ii.py
|
Python
| 0.999311 |
@@ -0,0 +1,1965 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A# @Author: Zeyuan Shang%0A# @Date: 2015-11-18 17:22:37%0A# @Last Modified by: Zeyuan Shang%0A# @Last Modified time: 2015-11-18 17:22:44%0Aclass Solution:%0A operators = %5B'+', '-', '*', '/'%5D%0A %0A def getPriority(self, operator):%0A return %7B%0A '+' : 1,%0A '-' : 1,%0A '*' : 2,%0A '/' : 2,%0A %7D.get(operator, 0)%0A %0A def toRPN(self, s):%0A tokens, stack = %5B%5D, %5B%5D%0A number = ''%0A for c in s:%0A if c.isdigit():%0A number += c%0A else:%0A if number:%0A tokens.append(number)%0A number = ''%0A if c in self.operators:%0A while len(stack) and self.getPriority(stack%5B-1%5D) %3E= self.getPriority(c):%0A tokens.append(stack.pop())%0A stack.append(c)%0A elif c == '(':%0A stack.append(c)%0A elif c == ')':%0A while len(stack) and stack%5B-1%5D != '(':%0A tokens.append(stack.pop())%0A stack.pop()%0A if number:%0A tokens.append(number)%0A while len(stack):%0A tokens.append(stack.pop())%0A return tokens%0A %0A def calcValue(self, x, y, operator):%0A return %7B%0A '+': lambda x, y: x + y,%0A '-': lambda x, y: x - y,%0A '*': lambda x, y: x * y,%0A '/': lambda x, y: int(float(x) / y),%0A %7D%5Boperator%5D(x, y)%0A %0A def evalRPN(self, tokens):%0A operands = %5B%5D%0A for token in tokens:%0A if token in self.operators:%0A y, x = operands.pop(), operands.pop()%0A operands.append(self.calcValue(x, y, token))%0A else:%0A operands.append(int(token))%0A return operands%5B0%5D%0A %0A def calculate(self, s):%0A tokens = self.toRPN(s)%0A return self.evalRPN(tokens)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.