commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
---|---|---|---|---|---|---|---|
4dc49433ad354b1530207db308f4c7b76f40db70
|
Add command
|
tf2server.py
|
tf2server.py
|
import os
import libtmux
class CorruptedTf2ServerInstanceError(Exception):
"""
Raised when an invalid TF2 server instance is found.
"""
class Tf2Server(object):
"""
The Tf2Server class represents a single Team Fortress 2 server.
"""
def __init__(self, name, path):
"""
Creates the Tf2Server class instance that uses the given path.
:param name: The TF2 server instance name.
:param path: The absolute path to where the TF2 server is located.
"""
self.name = name
self.path = path
self.tmux_server = None
if not os.path.isdir(os.path.join(path, 'tf')):
raise CorruptedTf2ServerInstanceError()
def _get_tmux_session_name(self):
file_name = os.path.join(self.path, '.tmux-session')
if not os.path.isfile(file_name):
return self.name
else:
with open(file_name, 'r') as f:
content = f.read()
return content.strip()
def _get_log_file_path(self):
return os.path.join(self.path, self.name.join('.log'))
def is_running(self):
"""
Checks whether the server is running or not.
:return: True if the instance is running, False otherwise.
"""
session_name = self._get_tmux_session_name()
if not self.tmux_server:
self.tmux_server = libtmux.Server()
return self.tmux_server.has_session(session_name)
def start(self, ip, port=27015, map='cp_badlands', server_cfg_file='server.cfg'):
"""
Starts the server, if it is not yet running.
"""
if self.is_running():
print('Server already running')
else:
session = self.tmux_server.new_session(self._get_tmux_session_name())
pane = session.attached_pane
srcds_location = os.path.join(self.path, 'srcds_run')
exec = '{0} -game tf -ip {1} -port {2} +map {3} +maxplayers 24 -secured -timeout 0 +servercfgfile {4}' \
.format(srcds_location, ip, port, map, server_cfg_file)
print(exec)
pane.send_keys(exec)
def stop(self):
if self.is_running():
self.tmux_server.kill_session(self._get_tmux_session_name())
|
Python
| 0.001952 |
@@ -3,16 +3,28 @@
port os%0A
+import time%0A
import l
@@ -329,17 +329,16 @@
Create
-s
the Tf2
@@ -1112,16 +1112,555 @@
log'))%0A%0A
+ def _has_sourcemod(self):%0A path = os.path.join(self.path, 'tf/addons/sourcemod/plugins/basechat.smx')%0A return os.path.isfile(path)%0A%0A def command(self, command):%0A %22%22%22%0A Execute a command on the running TF2 server instance.%0A :param command: str%0A %22%22%22%0A if not self.is_running():%0A return%0A%0A session = self.tmux_server.find_where(%7B'session_name': self._get_tmux_session_name()%7D)%0A pane = session.attached_pane%0A%0A print(command)%0A pane.send_keys(command)%0A%0A
def
@@ -1702,17 +1702,16 @@
Check
-s
whether
@@ -2467,20 +2467,23 @@
-exec
+command
= '%7B0%7D
@@ -2665,20 +2665,23 @@
print(
-exec
+command
)%0A
@@ -2705,12 +2705,15 @@
eys(
-exec
+command
)%0A%0A
@@ -2753,32 +2753,345 @@
f.is_running():%0A
+ msg = 'Server shutting down in 10 seconds!'%0A print(msg)%0A if self._has_sourcemod():%0A self.command('sm_csay %22%7B0%7D%22'.format(msg))%0A self.command('say %22%7B0%7D%22'.format(msg))%0A%0A time.sleep(10)%0A self.command('quit')%0A time.sleep(5)%0A%0A
self
|
9b922f5e3e99d2fa9eb4b56ebb3ccf6b591680cc
|
update docstring
|
depfinder/cli.py
|
depfinder/cli.py
|
# Copyright (c) <2015-2016>, Eric Dill
#
# All rights reserved. Redistribution and use in source and binary forms, with
# or without modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, division, print_function
from argparse import ArgumentParser
from collections import defaultdict
import logging
import os
from pprint import pprint
import itertools
import pdb
import sys
import yaml
from . import main
from .main import (simple_import_search, notebook_path_to_dependencies,
parse_file, sanitize_deps)
logger = logging.getLogger('depfinder')
class InvalidSelection(RuntimeError):
pass
def _init_parser():
p = ArgumentParser(
description="""
Tool for inspecting the dependencies of your python project.
""",
)
p.add_argument(
'file_or_directory',
help=("Valid options are a single python file, a single jupyter "
"(ipython) notebook or a directory of files that include "
"python files")
)
p.add_argument(
'-y',
'--yaml',
action='store_true',
default=False,
help=("Output in syntactically valid yaml when true. Defaults to "
"%(default)s"))
p.add_argument(
'-V',
'--version',
action='store_true',
default=False,
help="Print out the version of depfinder and exit"
)
p.add_argument(
'--no-remap',
action='store_true',
default=False,
help=("Do not remap the names of the imported libraries to their "
"proper conda name")
)
p.add_argument(
'-v',
'--verbose',
action='store_true',
default=False,
help="Enable debug level logging info from depfinder"
)
p.add_argument(
'-q',
'--quiet',
action='store_true',
default=False,
help="Turn off all logging from depfinder"
)
p.add_argument(
'-k', '--key',
action="append",
default=[],
help=("Select some or all of the output keys. Valid options are "
"'required', 'optional', 'builtin', 'relative', 'all'. Defaults "
"to 'all'")
)
p.add_argument(
'--conda',
action="store_true",
default=False,
help=("Format output so it can be passed as an argument to conda "
"install or conda create")
)
p.add_argument(
'--pdb',
action="store_true",
help="Enable PDB debugging on exception",
default=False,
)
p.add_argument(
'--ignore',
default='',
help="Ignore pattern(s) for files not to inpsect"
)
p.add_argument(
'--strict',
default=False,
action="store_true",
help=("Immediately raise an Exception if any files fail to parse. Defaults to off.")
)
return p
def cli():
p = _init_parser()
args = p.parse_args()
if args.verbose and args.quiet:
msg = ("You have enabled both verbose mode (--verbose or -v) and "
"quiet mode (-q or --quiet). Please pick one. Exiting...")
raise InvalidSelection(msg)
if args.pdb:
# set the pdb_hook as the except hook for all exceptions
def pdb_hook(exctype, value, traceback):
pdb.post_mortem(traceback)
sys.excepthook = pdb_hook
main.STRICT_CHECKING = args.strict
# Configure Logging
loglevel = logging.INFO
if args.quiet:
loglevel = logging.ERROR
elif args.verbose:
loglevel = logging.DEBUG
stream_handler = logging.StreamHandler()
stream_handler.setLevel(loglevel)
f = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(f)
stream_handler.setFormatter(formatter)
logger.setLevel(loglevel)
logger.addHandler(stream_handler)
if args.version:
# handle the case where the user just cares about the version. Print
# version and exit
from . import __version__
print(__version__)
return 0
file_or_dir = args.file_or_directory
keys = args.key
if keys == []:
keys = None
logger.debug('keys: %s', keys)
def dump_deps(deps, keys):
"""
Helper function to print the dependencies to the console.
Parameters
----------
deps : dict
Dictionary of dependencies that were found
"""
if keys is None:
keys = list(deps.keys())
deps = {k: list(v) for k, v in deps.items() if k in keys}
if args.yaml:
print(yaml.dump(deps, default_flow_style=False))
elif args.conda:
list_of_deps = [item for sublist in itertools.chain(deps.values())
for item in sublist]
print(' '.join(list_of_deps))
else:
pprint(deps)
if os.path.isdir(file_or_dir):
logger.debug("Treating {} as a directory and recursively searching "
"it for python files".format(file_or_dir))
# directories are a little easier from the purpose of the API call.
# print the dependencies to the console and then exit
ignore = args.ignore.split(',')
deps = simple_import_search(file_or_dir, remap=not args.no_remap,
ignore=ignore)
dump_deps(deps, keys)
return 0
elif os.path.isfile(file_or_dir):
if file_or_dir.endswith('ipynb'):
logger.debug("Treating {} as a jupyter notebook and searching "
"all of its code cells".format(file_or_dir))
deps = notebook_path_to_dependencies(file_or_dir,
remap=not args.no_remap)
sanitized = sanitize_deps(deps)
# print the dependencies to the console and then exit
dump_deps(sanitized, keys)
return 0
elif file_or_dir.endswith('.py'):
logger.debug("Treating {} as a single python file"
"".format(file_or_dir))
mod, path, import_finder = parse_file(file_or_dir)
mods = defaultdict(set)
for k, v in import_finder.describe().items():
mods[k].update(v)
deps = {k: sorted(list(v)) for k, v in mods.items() if v}
sanitized = sanitize_deps(deps)
# print the dependencies to the console and then exit
dump_deps(sanitized, keys)
return 0
else:
# Any file with a suffix that is not ".ipynb" or ".py" will not
# be parsed correctly
msg = ("depfinder is only configured to work with jupyter "
"notebooks and python source code files. It is anticipated "
"that the file {} will not work with depfinder"
"".format(file_or_dir))
raise RuntimeError(msg)
|
Python
| 0.000001 |
@@ -4013,34 +4013,44 @@
lp=%22
-Ignore pattern(s) for file
+Comma separated list of file pattern
s no
@@ -4056,18 +4056,18 @@
ot to in
-p
s
+p
ect%22%0A
|
a1bda82bd06cbfd12e6074f22cb31d88f2abd96a
|
update py +x
|
tools/fuckGFW.py
|
tools/fuckGFW.py
|
'''
Update hosts for *nix
Author: [email protected]
Version: 0.0.1
Date: 2012-10-24 14:35:39
'''
import urllib2
import os
import sys
HOSTS_PATH = "/etc/hosts"
HOSTS_SOURCE = "http://tx.txthinking.com/hosts"
SEARCH_STRING = "#TX-HOSTS"
def GetRemoteHosts(url):
f = urllib2.urlopen(url, timeout=5)
hosts = [line for line in f]
f.close()
return hosts
def main():
try:
hosts = GetRemoteHosts(HOSTS_SOURCE)
except IOError:
print "Could't connect to %s. Try again." % HOSTS_SOURCE
sys.exit(1)
yours = ""
if os.path.isfile(HOSTS_PATH):
f = open(HOSTS_PATH, "r")
for line in f:
if SEARCH_STRING in line:
break
yours += line
f.close()
os.rename(HOSTS_PATH, HOSTS_PATH + ".BAK")
yours += SEARCH_STRING + "\n"
fp = open(HOSTS_PATH, "w")
fp.write(yours)
fp.writelines(hosts)
fp.close()
print "Success"
if __name__ == "__main__":
main()
|
Python
| 0.000001 |
@@ -1,12 +1,58 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A
'''%0AUpdate h
|
ed8fc99f0867779db8879b3f6cc142985d01fc58
|
change RunInstances argument
|
qingcloud/cli/iaas_client/actions/instance/run_instances.py
|
qingcloud/cli/iaas_client/actions/instance/run_instances.py
|
# coding: utf-8
from qingcloud.cli.misc.utils import explode_array
from qingcloud.cli.iaas_client.actions.base import BaseAction
class RunInstancesAction(BaseAction):
action = 'RunInstances'
command = 'run-instances'
usage = '%(prog)s --image_id <image_id> --instance_type <instance_type> [options] [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument('-i', '--image_id', dest='image_id',
action='store', type=str, default='',
help='Image ID')
parser.add_argument('-t', '--instance_type', dest='instance_type',
action='store', type=str, default=None,
help='Instance type: small_b, small_c, medium_a, medium_b, medium_c,\
large_a, large_b, large_c')
parser.add_argument('-c', '--count', dest = 'count',
action='store', type=int, default=1,
help='The number of instances to launch, default 1.')
parser.add_argument('-C', '--cpu', dest='cpu',
action='store', type=int, default=0,
help='CPU core: 1, 2, 4, 8, 16')
parser.add_argument('-M', '--memory', dest='memory',
action='store', type=int, default=0,
help='Memory size in MB: 512, 1024, 2048, 4096, 8192, 16384')
parser.add_argument('-N', '--instance_name', dest='instance_name',
action='store', type=str, default='',
help='Instance name')
parser.add_argument('-n', '--vxnets', dest='vxnets',
action='store', type=str, default='',
help='Specifies the IDs of vxnets the instance will join.')
parser.add_argument('-s', '--security_group', dest='security_group',
action='store', type=str, default='',
help='The ID of security group that will be applied to instance')
parser.add_argument('-m', '--login_mode', dest='login_mode',
action='store', type=str, default='',
help='SSH login mode: keypair or passwd')
parser.add_argument('-p', '--login_passwd', dest='login_passwd',
action='store', type=str, default='',
help='Login_passwd, should specified when SSH login mode is "passwd".')
parser.add_argument('-k', '--login_keypair', dest='login_keypair',
action='store', type=str, default='',
help='Login_keypair, should specified when SSH login mode is "keypair".')
return parser
@classmethod
def build_directive(cls, options):
required_params = {
'image_id': options.image_id,
'instance_type': options.instance_type,
}
for param in required_params:
if required_params[param] is None or required_params[param] == '':
print 'error: param [%s] should be specified' % param
return None
return {
'image_id': options.image_id,
'instance_type' : options.instance_type,
'cpu': options.cpu,
'memory': options.memory,
'instance_name' : options.instance_name,
'count' : options.count,
'vxnets': explode_array(options.vxnets),
'security_group': options.security_group,
'login_mode': options.login_mode,
'login_passwd': options.login_passwd,
'login_keypair': options.login_keypair,
}
|
Python
| 0 |
@@ -414,17 +414,17 @@
ument('-
-i
+m
', '--im
@@ -1935,25 +1935,25 @@
_argument('-
-m
+l
', '--login_
|
3d5fc893cee6b7ab1596acedb052366ce86005b7
|
Use requests module rather than mozsvc.http_helpers
|
tokenserver/assignment/sqlnode.py
|
tokenserver/assignment/sqlnode.py
|
""" SQL Mappers
"""
import json
import sys
from zope.interface import implements
import time
from mozsvc.util import dnslookup
from tokenserver.assignment import INodeAssignment
from tokenserver.util import get_logger
# try to have this changed upstream:
# XXX being able to set autocommit=1;
# forcing it for now
from pymysql.connections import Connection, COM_QUERY
def autocommit(self, value):
value = True
try:
self._execute_command(COM_QUERY, "SET AUTOCOMMIT = %s" % \
self.escape(value))
self.read_packet()
except:
exc, value, __ = sys.exc_info()
self.errorhandler(None, exc, value)
Connection.autocommit = autocommit
from mozsvc.exceptions import BackendError
from mozsvc.http_helpers import get_url
from wimms.sql import SQLMetadata
from wimms.shardedsql import ShardedSQLMetadata
class SQLNodeAssignment(SQLMetadata):
"""Just a placeholder to mark with a zope interface.
Silly, isn't it ?
"""
implements(INodeAssignment)
def get_patterns(self):
res = super(SQLNodeAssignment, self).get_patterns()
return dict([(pattern.service, pattern.pattern) for pattern in res])
class ShardedSQLNodeAssignment(ShardedSQLMetadata):
"""Like the SQL backend, but with one DB per service
"""
implements(INodeAssignment)
class SecuredShardedSQLNodeAssignment(ShardedSQLMetadata):
"""Like the sharded backend, but proxies all writes to stoken
"""
implements(INodeAssignment)
def __init__(self, proxy_uri, databases, create_tables, **kw):
base = super(SecuredShardedSQLNodeAssignment, self)
base.__init__(databases, create_tables, **kw)
self.proxy_uri = proxy_uri
self.logger = None
self._resolved = None, time.time()
def get_logger(self):
if self.logger is None:
self.logger = get_logger()
return self.logger
def _proxy(self, method, url, data=None, headers=None):
if data is not None:
data = json.dumps(data)
status, headers, body = get_url(url, method, data, headers)
if body:
try:
body = json.loads(body)
except ValueError:
self.get_logger().error("bad json body from sreg (%s): %s" %
(url, body))
raise BackendError('Bad answer from proxy')
return status, body
def _dnslookup(self, proxy):
# does a DNS lookup with gethostbyname and cache it in
# memory for one hour.
current, age = self._resolved
if current is None or age + 3600 < time.time():
current = dnslookup(proxy)
self._resolved = current, time.time()
return current
def allocate_node(self, email, service):
"""Calls the proxy to get an allocation"""
proxy_uri = self._dnslookup(self.proxy_uri)
url = '%s/1.0/%s' % (proxy_uri, service)
data = {'email': email}
status, body = self._proxy('POST', url, data)
if status != 200:
msg = 'Could not get an allocation\n'
msg += 'status: %s\n' % status
msg += 'body: %s\n' % str(body)
raise BackendError(msg, backend=url)
return body['uid'], body['node']
|
Python
| 0 |
@@ -87,16 +87,33 @@
t time%0A%0A
+import requests%0A%0A
from moz
@@ -769,48 +769,8 @@
ror%0A
-from mozsvc.http_helpers import get_url%0A
from
@@ -2033,75 +2033,316 @@
ta)%0A
- status, headers, body = get_url(url, method, data, headers)
+%0A try:%0A resp = requests.request(method, url, data=data, headers=headers)%0A except requests.exceptions.RequestException:%0A self.get_logger().exception(%22error talking to sreg (%25s)%22 %25 (url,))%0A raise BackendError('Error talking to proxy')%0A%0A body = resp.content
%0A
@@ -2664,22 +2664,32 @@
return
+resp.
status
+_code
, body%0A%0A
|
8f22be125cabf38f00b360e0c6d48a5d1650bef0
|
Remove wrong dependency
|
sponsorship_compassion/__openerp__.py
|
sponsorship_compassion/__openerp__.py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# ______ Releasing children from poverty _
# / ____/___ ____ ___ ____ ____ ___________(_)___ ____
# / / / __ \/ __ `__ \/ __ \/ __ `/ ___/ ___/ / __ \/ __ \
# / /___/ /_/ / / / / / / /_/ / /_/ (__ |__ ) / /_/ / / / /
# \____/\____/_/ /_/ /_/ .___/\__,_/____/____/_/\____/_/ /_/
# /_/
# in Jesus' name
#
# Copyright (C) 2014-2015 Compassion CH (http://www.compassion.ch)
# @author: Cyril Sester, Emanuel Cino
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Compassion Sponsorships',
'version': '1.6',
'category': 'Other',
'author': 'Compassion CH',
'website': 'http://www.compassion.ch',
'depends': ['contract_compassion', 'crm',
'l10n_ch', 'account_cancel', 'partner_compassion',
'web_m2x_options', 'account_invoice_split_invoice'],
'data': [
'view/sponsorship_contract_view.xml',
'view/sponsorship_contract_group_view.xml',
'view/end_sponsorship_wizard_view.xml',
'view/invoice_line_view.xml',
'view/res_partner_view.xml',
'view/generate_gift_view.xml',
'view/account_invoice_split_wizard_view.xml',
'view/child_view.xml',
'data/lang.xml',
'data/sequence.xml',
'data/sponsorship_product.xml',
'data/analytic_accounting.xml',
'workflow/contract_workflow.xml',
],
'demo': [
'demo/sponsorship_compassion_demo.xml'
],
'installable': True,
'auto_install': False,
}
|
Python
| 0.000086 |
@@ -1631,19 +1631,8 @@
- 'l10n_ch',
'ac
|
145e9141af1e1abdf0a9ab3c035ed8df6298ba0f
|
rebase migration dependency.
|
accelerator/migrations/0015_expert_bio_add_max_length_validation.py
|
accelerator/migrations/0015_expert_bio_add_max_length_validation.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-07-25 15:00
from __future__ import unicode_literals
import django.core.validators
from django.db import (
migrations,
models,
)
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0013_allocator'),
]
operations = [
migrations.AlterField(
model_name='expertprofile',
name='bio',
field=models.TextField(blank=True, default='', validators=[
django.core.validators.MaxLengthValidator(7500)]),
),
]
|
Python
| 0 |
@@ -286,19 +286,41 @@
'001
-3_allocator
+4_alter_fluent_page_type_managers
'),%0A
|
0992a05b5f199b6ade27f19af9271e5e8556c372
|
Clarified an example param
|
apollo-ws/visualizer-services/GAIA/trunk/VisualizerServiceClient.py
|
apollo-ws/visualizer-services/GAIA/trunk/VisualizerServiceClient.py
|
# Copyright 2013 University of Pittsburgh
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
'''
Created on Feb 13, 2013
@author: John Levander
'''
from VisualizerService_services import *
#from SimulatorService_services_types import *
from ApolloFactory import *
import time
#create the service object
service = VisualizerServiceLocator().getVisualizerServiceEI("http://127.0.0.1:8087/gaia")
#create an epidemic model input object
factory = ApolloFactory()
runRequest = runRequest()
runRequest._visualizerConfiguration = factory.new_VisualizerConfiguration()
runRequest._visualizerConfiguration._authentication._requesterId = "fake"
runRequest._visualizerConfiguration._authentication._requesterPassword = "fake"
runRequest._visualizerConfiguration._visualizerIdentification._visualizerDeveloper = "PSC"
runRequest._visualizerConfiguration._visualizerIdentification._visualizerName = "GAIA"
runRequest._visualizerConfiguration._visualizerIdentification._visualizerVersion = "v1.0"
runRequest._visualizerConfiguration._visualizationOptions._runId = "PSC_GAIA_v1.0_42"
runRequest._visualizerConfiguration._visualizationOptions._location = "42003/Allegheny County"
runRequest._visualizerConfiguration._visualizationOptions._outputFormat = "mp4"
print 'Calling "run"'
run_response = service.run(runRequest)
print "Run submitted with ID: " + str(run_response._runId)
get_run_status_request = getRunStatusRequest()
get_run_status_request._runId = run_response._runId
run_status_response = service.getRunStatus(get_run_status_request)
print '\nCalling "getRunStatus"'
print "Status Code: " + run_status_response._runStatus._status + " Status Message: " + run_status_response._runStatus._message
|
Python
| 0.999483 |
@@ -1552,20 +1552,20 @@
= %22PSC_
-GAIA
+FRED
_v1.0_42
|
837f05228fac7f6addd28069c6387f798e01ff8c
|
Add checksum test.
|
tests/test_fs.py
|
tests/test_fs.py
|
from farmfs.fs import normpath as _normalize
from farmfs.fs import userPath2Path as up2p
from farmfs.fs import Path
import pytest
def test_create_path():
p1 = Path("/")
p2 = Path("/a")
p2 = Path("/a/b")
p3 = Path(p1)
p4 = Path("a", p1)
with pytest.raises(AssertionError):
p5 = Path("/a/b", p2)
with pytest.raises(ValueError):
p6 = Path(None)
with pytest.raises(ValueError):
p7 = Path(None, p1)
with pytest.raises(AssertionError):
p8 = Path("a", "b")
def test_normalize_abs():
assert _normalize("/") == "/"
assert _normalize("/a") == "/a"
assert _normalize("/a/") == "/a"
assert _normalize("/a/b") == "/a/b"
assert _normalize("/a/b/") == "/a/b"
assert _normalize("/a//b") == "/a/b"
assert _normalize("/a//b//") == "/a/b"
def test_normalize_relative():
assert _normalize("a") == "a"
assert _normalize("a/") == "a"
assert _normalize("a/b") == "a/b"
assert _normalize("a/b/") == "a/b"
assert _normalize("a//b") == "a/b"
assert _normalize("a//b//") == "a/b"
def test_userPath2Path():
assert up2p("c", Path("/a/b")) == Path("/a/b/c")
assert up2p("/c", Path("/a/b")) == Path("/c")
def test_cmp():
assert Path("/a/b") < Path("/a/c")
assert Path("/a/c") > Path("/a/b")
assert Path("/a/2") < Path("/b/1")
assert Path("/") < Path("/a")
|
Python
| 0.000002 |
@@ -1321,20 +1321,838 @@
h(%22/%22) %3C Path(%22/a%22)%0A
+%[email protected](reason=%22bugs not impacting development at moment.%22)%0Adef test_relative_to():%0A assert Path(%22/a/b%22).relative_to(Path(%22/%22)) == %22a/b%22%0A assert Path(%22/a/b%22).relative_to(Path(%22/a%22)) == %22b%22%0A assert Path(%22/a/b/c%22).relative_to(Path(%22/a%22)) == %22b/c%22%0A assert Path(%22/a/b/c%22).relative_to(Path(%22/a/b%22)) == %22c%22%0A assert Path(%22/a/b%22).relative_to(Path(%22/c%22)) == %22../a/b%22%0A%[email protected](%0A %22input,expected%22,%0A %5B%0A (b'', u%22d41d8cd98f00b204e9800998ecf8427e%22),%0A (b'abc', u%22900150983cd24fb0d6963f7d28e17f72%22),%0A (b'%5Cxea%5Cx80%5Cx80abcd%5Cxde%5Cxb4', u'b8c6dee81075e87d348522b146c95ae3'),%0A %5D,)%0Adef test_checksum_empty(tmp_path, input, expected):%0A tmp = Path(str(tmp_path))%0A fp = tmp.join(%22empty.txt%22)%0A with fp.open(%22wb%22) as fd:%0A fd.write(input)%0A assert fp.checksum() == expected%0A
|
9786c5f242f2b70240e7bb23c866c864cb4ed4ca
|
Add registrations to admin
|
expeditions/admin.py
|
expeditions/admin.py
|
from django.contrib import admin
from expeditions.models import Expedition, Waypoint
# Register your models here.
class ExpeditionAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'start_date', 'end_date', 'published')
search_fields = ('name', 'start_date')
list_filter = ('published', )
class WaypointAdmin(admin.ModelAdmin):
list_display = ('id', 'expedition', 'name', 'system', 'planet', 'datetime')
list_filter = ('expedition', )
search_fields = ('name', 'expedition__name', 'system', 'planet', 'datetime')
admin.site.register(Expedition, ExpeditionAdmin)
admin.site.register(Waypoint, WaypointAdmin)
|
Python
| 0 |
@@ -77,16 +77,30 @@
Waypoint
+, Registration
%0A# Regis
@@ -236,16 +236,56 @@
ished')%0A
+ list_display_links = ('id', 'name')%0A
sear
@@ -470,24 +470,64 @@
'datetime')%0A
+ list_display_links = ('id', 'name')%0A
list_fil
@@ -631,16 +631,273 @@
time')%0A%0A
+%0Aclass RegistrationAdmin(admin.ModelAdmin):%0A list_display = ('id', 'user', 'expedition', 'registration_number')%0A list_display_links = ('id', 'user')%0A list_filter = ('expedition', 'user')%0A search_fields = ('user__username', 'expedition__name')%0A%0A%0A
admin.si
@@ -977,12 +977,65 @@
ypointAdmin)
+%0Aadmin.site.register(Registration, RegistrationAdmin)
|
6c7ce5298f9555027dafcd36596775d4e4424d97
|
Add GUIDHelper class
|
exporter/exporter.py
|
exporter/exporter.py
|
from cfconfigurator.cf import CF
from cfconfigurator.uaa import UAA, UAAException
import collections
import yaml
import pyaml
class LastUpdatedOrderedDict(collections.OrderedDict):
'Store items in the order the keys were last added'
def __setitem__(self, key, value):
if key in self:
del self[key]
collections.OrderedDict.__setitem__(self, key, value)
class Space:
def __init__(self):
pass
class SecurityGroup:
properties = ["name", "context", "rules"]
def __init__(self, config):
self._prop_dict = LastUpdatedOrderedDict()
self._config = config
self._rules = []
def load(self):
rules = self._config['rules']
for rule in rules:
self.add_rule(rule)
for prop in self.properties:
try:
value = getattr(self, prop)
self._prop_dict[str(prop)] = value
except AttributeError:
pass
def asdict(self):
return self._prop_dict
@property
def rules(self):
return self._rules
def add_rule(self, rule):
new_rule = SecurityRule(rule)
new_rule.load()
self._rules.append(new_rule.sec_rule)
def __getattr__(self, name):
if name in self._config:
return self._config[name]
raise AttributeError("%s not found" % name)
class SecurityRule:
properties = ["name", "protocol", "destination", "ports", "logs", "code", "type"]
def __init__(self, config):
self._config = config
self._prop_dict = LastUpdatedOrderedDict()
def asdict(self):
return self._prop_dict
def load(self):
for prop in self.properties:
try:
value = getattr(self, prop)
self._prop_dict[prop] = value
except AttributeError:
pass
def __getattr__(self, name):
if name in self._config:
return self._config[name]
raise AttributeError("%s not found" % name)
class User:
properties = ["name", "active", "email", "given_name", "family_name",
"default_organization", "default_space", "origin", "external_id"]
def __init__(self, user_cf, user_uaa):
self._user_cf = user_cf
self._user_uaa = user_uaa
self._prop_dict = LastUpdatedOrderedDict()
def asdict(self):
return self._prop_dict
@property
def given_name(self):
response = None
name = self.__getattr__('name')
if 'givenName' in name:
response = name['givenName']
return response
@property
def family_name(self):
response = None
name = self.__getattr__('name')
if 'familyName' in name:
response = name['familyName']
return response
@property
def name(self):
return getattr(self, 'userName')
def load(self):
for prop in self.properties:
try:
value = getattr(self, prop)
self._prop_dict[prop] = value
except AttributeError as atbe:
pass
def __getattr__(self, name):
if name in self._user_cf:
return self._user_cf[name]
elif name in self._user_uaa:
return self._user_uaa[name]
raise AttributeError("%s not found" % name)
class Exporter:
def __init__(self, api_url=""):
self.client = CF(api_url)
def login(self, admin_user='admin', admin_password=''):
self.client.login(admin_user, admin_password)
def get_variable_group(self):
response = self.client.request("GET", "/v2/config/environment_variable_groups/running")
return response
def get_user_default_org(self, guid):
pass
def get_user_default_space(self, guid):
response = self.client.request("GET", "/v2/spaces")
pass
def get_users_list(self):
response = self.client.request("GET", "/v2/users")
resources = response[0]['resources']
user_ids = [user['metadata']['guid'] for user in resources]
return user_ids
def get_user_details(self, guid):
user = None
user_dict = collections.OrderedDict()
try:
user = self.client.uaa.user_get(user_id)
user_dict['name'] = user['userName']
user_dict['email'] = [email["value"] for email in user['emails'] if email['primary']]
user_dict['familyName'] = user['name']['familyName']
user_dict['givenName'] = user['name']['givenName']
user_dict['active'] = user['active']
except UAAException as uaaexp:
print(uaaexp)
return user_dict
def transform_in_valid_yaml(self, obj):
yaml_string = pyaml.dump(obj)
print(yaml_string)
|
Python
| 0 |
@@ -118,16 +118,26 @@
rt pyaml
+%0Aimport os
%0A%0Aclass
@@ -445,16 +445,856 @@
pass%0A%0A
+%0Aclass GUIDHelper:%0A%0A def __init__(self, config):%0A self._config = config%0A%0A def get_guids(self):%0A guids = %5Bresource%5B'metadata'%5D%5B'guid'%5D for resource in self._config%5B0%5D%5B'resources'%5D%5D%0A return guid%0A%0Aclass Organization:%0A%0A properties = %5B%22name%22%5D%0A%0A def __init__(self, config):%0A self._prop_dict = LastUpdatedOrderedDict()%0A self._config = config%0A self._rules = %5B%5D%0A%0A def load(self):%0A for prop in self.properties:%0A try:%0A value = getattr(self, prop)%0A self._prop_dict%5Bprop%5D = value%0A except AttributeError:%0A pass%0A%0A def asdict(self):%0A return self._prop_dict%0A%0A def __getattr__(self, name):%0A if name in self._config:%0A return self._config%5Bname%5D%0A raise AttributeError(%22%25s not found%22 %25 name)%0A%0A
class Se
@@ -4273,16 +4273,69 @@
rl=%22%22):%0A
+ api_url = os.environ.get(%22EXPORTER_API_URL%22)%0A
@@ -4421,16 +4421,142 @@
rd=''):%0A
+ admin_user = os.environ.get(%22EXPORTER_ADMIN_USER%22)%0A admin_password = os.environ.get(%22EXPORTER_ADMIN_PASSWORD%22)%0A
|
44c78525ee886a369be66d671523dd3258ba37d5
|
Remove hard FPU for Cortex M4 // Resolve #162
|
platformio/builder/scripts/basearm.py
|
platformio/builder/scripts/basearm.py
|
# Copyright (C) Ivan Kravets <[email protected]>
# See LICENSE for details.
"""
Base for ARM microcontrollers.
"""
from SCons.Script import Builder, DefaultEnvironment
env = DefaultEnvironment()
env.Replace(
AR="arm-none-eabi-ar",
AS="arm-none-eabi-as",
CC="arm-none-eabi-gcc",
CXX="arm-none-eabi-g++",
OBJCOPY="arm-none-eabi-objcopy",
RANLIB="arm-none-eabi-ranlib",
SIZETOOL="arm-none-eabi-size",
ARFLAGS=["rcs"],
ASPPFLAGS=["-x", "assembler-with-cpp"],
CPPFLAGS=[
"-g", # include debugging info (so errors include line numbers)
"-Os", # optimize for size
"-ffunction-sections", # place each function in its own section
"-fdata-sections",
"-Wall",
"-mthumb",
"-mcpu=${BOARD_OPTIONS['build']['cpu']}",
"-nostdlib",
"-MMD" # output dependancy info
],
CXXFLAGS=[
"-fno-rtti",
"-fno-exceptions"
],
CPPDEFINES=[
"F_CPU=$BOARD_F_CPU"
],
LINKFLAGS=[
"-Os",
"-Wl,--gc-sections,--relax",
"-mthumb",
"-mcpu=${BOARD_OPTIONS['build']['cpu']}",
"--specs=nano.specs"
],
LIBS=["c", "gcc", "m"],
SIZEPRINTCMD='"$SIZETOOL" -B -d $SOURCES'
)
if env.get("BOARD_OPTIONS", {}).get("build", {}).get(
"cpu", "")[-2:] == "m4" and env.get("BOARD") != "frdm_k20d50m":
env.Append(
CPPFLAGS=[
"-mfloat-abi=hard",
"-mfpu=fpv4-sp-d16",
"-fsingle-precision-constant"
],
LINKFLAGS=[
"-mfloat-abi=hard",
"-mfpu=fpv4-sp-d16",
"-fsingle-precision-constant"
]
)
env.Append(
BUILDERS=dict(
ElfToBin=Builder(
action=" ".join([
"$OBJCOPY",
"-O",
"binary",
"$SOURCES",
"$TARGET"]),
suffix=".bin"
),
ElfToHex=Builder(
action=" ".join([
"$OBJCOPY",
"-O",
"ihex",
"-R",
".eeprom",
"$SOURCES",
"$TARGET"]),
suffix=".hex"
)
)
)
|
Python
| 0 |
@@ -1254,431 +1254,8 @@
%0A)%0A%0A
-if env.get(%22BOARD_OPTIONS%22, %7B%7D).get(%22build%22, %7B%7D).get(%0A %22cpu%22, %22%22)%5B-2:%5D == %22m4%22 and env.get(%22BOARD%22) != %22frdm_k20d50m%22:%0A env.Append(%0A CPPFLAGS=%5B%0A %22-mfloat-abi=hard%22,%0A %22-mfpu=fpv4-sp-d16%22,%0A %22-fsingle-precision-constant%22%0A %5D,%0A LINKFLAGS=%5B%0A %22-mfloat-abi=hard%22,%0A %22-mfpu=fpv4-sp-d16%22,%0A %22-fsingle-precision-constant%22%0A %5D%0A )%0A%0A
env.
|
98bacbc912513fa33775e2e6c2e41363aea7c793
|
Remove strange code
|
stencil/base.py
|
stencil/base.py
|
import os
import optparse
import sys
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from .resources import Directory, File, Template
class WrongSource(Exception):
pass
class Stencil(object):
source = None
variables = []
help = None
def __init__(self):
self.resources = {}
self.context = {}
def get_absolute_path(self, source):
module_path = sys.modules[self.__class__.__module__].__file__
source_path = os.path.join(os.path.dirname(module_path), source)
return os.path.abspath(source_path)
def get_source_list(self):
if isinstance(self.source, (list, tuple)):
source_list = list(self.source)
else:
source_list = [self.source]
source_list = [self.get_absolute_path(source) for source in source_list]
return [path for path in source_list if os.path.isdir(path)]
def copy(self, target):
os.makedirs(target, 0755)
for path in sorted(self.resources):
real_path = os.path.join(target, path.format(**self.context))
self.resources[path].copy(real_path, self.context)
def fill_context(self, args):
for variable in self.variables:
value = getattr(args, variable.name, None)
if value is not None:
self.context[variable.name] = value
elif variable.name not in self.context:
if args.use_defaults and variable.default is not None:
self.context[variable.name] = variable.default
else:
self.context[variable.name] = variable.prompt()
def collect_resources(self):
source_list = self.get_source_list()
if not source_list:
raise WrongSource(
'None of the source directories exists: %r' % source_path)
resources = {}
for source in source_list:
for root, dirnames, filenames in os.walk(source):
root = os.path.relpath(root, source)
for dirname in dirnames:
path = os.path.normpath(os.path.join(root, dirname))
real_path = os.path.join(source, path)
resources[path % self.context] = Directory(real_path)
for filename in filenames:
path = os.path.normpath(os.path.join(root, filename))
real_path = os.path.join(source, path)
if path.endswith('_tmpl'):
path = path[:-5]
Resource = Template
else:
Resource = File
resources[path % self.context] = Resource(real_path)
self.resources = resources
@classmethod
def add_to_subparsers(cls, name, subparsers):
parser = subparsers.add_parser(name, help=cls.help)
for variable in cls.variables:
variable.add_to_parser(parser)
parser.add_argument('target', type=cls.absolute_path,
help='destination directory')
parser.set_defaults(func=cls.run)
@classmethod
def absolute_path(cls, arg):
return os.path.abspath(arg)
@classmethod
def run(cls, args):
stencil = cls()
stencil.fill_context(args)
stencil.collect_resources()
stencil.copy(args.target)
|
Python
| 0.000538 |
@@ -2274,39 +2274,24 @@
sources%5Bpath
- %25 self.context
%5D = Director
@@ -2715,23 +2715,8 @@
path
- %25 self.context
%5D =
|
e748b424d2dac18fc82192be154948ff52d3aae2
|
future div. print
|
tools/roc_auc.py
|
tools/roc_auc.py
|
#!/usr/bin/env python2
"""Calculate ROC AUC for parametric maps vs. Gleason scores. Optionally compare
AUCs and draw the ROC curves into a file."""
import argparse
import numpy as np
import dwi.patient
import dwi.plot
import dwi.util
def parse_args():
"""Parse command-line arguments."""
p = argparse.ArgumentParser(description=__doc__)
p.add_argument('--verbose', '-v', action='count',
help='be more verbose')
p.add_argument('--patients', default='patients.txt',
help='patients file')
p.add_argument('--pmapdir', nargs='+', required=True,
help='input pmap directory')
p.add_argument('--threshold', default='3+3',
help='classification threshold (maximum negative)')
p.add_argument('--nboot', type=int, default=2000,
help='number of bootstraps')
p.add_argument('--voxel', default='all',
help='index of voxel to use, or all, sole, mean, median')
p.add_argument('--multilesion', action='store_true',
help='use all lesions, not just first for each')
p.add_argument('--autoflip', action='store_true',
help='flip data when AUC < 0.5')
p.add_argument('--compare', action='store_true',
help='do AUC comparison')
p.add_argument('--figure',
help='output figure file')
args = p.parse_args()
return args
args = parse_args()
# Collect all parameters.
X, Y = [], []
Params = []
scores = None
for i, pmapdir in enumerate(args.pmapdir):
data = dwi.patient.read_pmaps(args.patients, pmapdir, [args.threshold],
voxel=args.voxel, multiroi=args.multilesion)
if scores is None:
scores, groups, group_sizes = dwi.patient.grouping(data)
for j, param in enumerate(data[0]['params']):
x, y = [], []
for d in data:
for v in d['pmap']:
x.append(v[j])
y.append(d['label'])
X.append(np.asarray(x))
Y.append(np.asarray(y))
Params.append('%i:%s' % (i, param))
# Print info.
if args.verbose > 1:
d = dict(n=len(X[0]),
ns=len(scores), s=sorted(scores),
ng=len(groups), g=' '.join(map(str, groups)),
gs=', '.join(map(str, group_sizes)))
print 'Samples: {n}'.format(**d)
print 'Scores: {ns}: {s}'.format(**d)
print 'Groups: {ng}: {g}'.format(**d)
print 'Group sizes: {gs}'.format(**d)
# Print AUCs and bootstrapped AUCs.
if args.verbose > 1:
print '# param AUC AUC_BS_mean lower upper'
Auc_bs = []
params_maxlen = max(len(p) for p in Params)
for x, y, param in zip(X, Y, Params):
fpr, tpr, auc = dwi.util.calculate_roc_auc(y, x, autoflip=False)
if args.autoflip and auc < 0.5:
x = -x
fpr, tpr, auc = dwi.util.calculate_roc_auc(y, x)
# Note: x may now be negated (ROC flipped).
auc_bs = dwi.util.bootstrap_aucs(y, x, args.nboot)
avg = np.mean(auc_bs)
ci1, ci2 = dwi.util.ci(auc_bs)
d = dict(param=param, auc=auc, avg=avg, ci1=ci1, ci2=ci2)
if args.verbose:
s = '{param:%i} {auc:.3f} {avg:.3f} {ci1:.3f} {ci2:.3f}' % params_maxlen
else:
s = '{auc:f}'
print s.format(**d)
Auc_bs.append(auc_bs)
# Print bootstrapped AUC comparisons.
if args.compare:
if args.verbose > 1:
print '# param1 param2 diff Z p'
done = []
for i, param_i in enumerate(Params):
for j, param_j in enumerate(Params):
if i == j or (i, j) in done or (j, i) in done:
continue
done.append((i,j))
d, z, p = dwi.util.compare_aucs(Auc_bs[i], Auc_bs[j])
print '%s %s %+0.4f %+0.4f %0.4f' % (param_i, param_j, d, z, p)
# Plot the ROCs.
if args.figure:
if args.verbose > 1:
print 'Plotting to {}...'.format(args.figure)
dwi.plot.plot_rocs(X, Y, params=Params, autoflip=args.autoflip,
outfile=args.figure)
|
Python
| 0.999998 |
@@ -143,16 +143,64 @@
le.%22%22%22%0A%0A
+from __future__ import division, print_function%0A
import a
@@ -2293,17 +2293,17 @@
print
-
+(
'Samples
@@ -2312,32 +2312,33 @@
%7Bn%7D'.format(**d)
+)
%0A print 'Scor
@@ -2331,17 +2331,17 @@
print
-
+(
'Scores:
@@ -2355,32 +2355,33 @@
%7Bs%7D'.format(**d)
+)
%0A print 'Grou
@@ -2370,25 +2370,25 @@
))%0A print
-
+(
'Groups: %7Bng
@@ -2402,24 +2402,25 @@
.format(**d)
+)
%0A print '
@@ -2417,17 +2417,17 @@
print
-
+(
'Group s
@@ -2449,16 +2449,17 @@
mat(**d)
+)
%0A%0A# Prin
@@ -2510,33 +2510,33 @@
e %3E 1:%0A print
-
+(
'# param AUC A
@@ -2560,16 +2560,17 @@
upper'
+)
%0AAuc_bs
@@ -3206,17 +3206,17 @@
print
-
+(
s.format
@@ -3220,16 +3220,17 @@
mat(**d)
+)
%0A Auc
@@ -3342,17 +3342,17 @@
print
-
+(
'# param
@@ -3373,16 +3373,17 @@
f Z p'
+)
%0A don
@@ -3673,17 +3673,17 @@
print
-
+(
'%25s %25s
@@ -3735,16 +3735,17 @@
d, z, p)
+)
%0A%0A# Plot
@@ -3809,17 +3809,17 @@
print
-
+(
'Plottin
@@ -3849,16 +3849,17 @@
.figure)
+)
%0A dwi
|
e3eb155d762e7941e5cb4aa08fe580baeb2e191b
|
Improve doctest results parser
|
platformio/test/runners/doctest.py
|
platformio/test/runners/doctest.py
|
# Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from platformio.test.result import TestCase, TestCaseSource, TestStatus
from platformio.test.runners.base import TestRunnerBase
class DoctestTestCaseParser:
def __init__(self):
self._tmp_tc = None
self._name_tokens = []
def parse(self, line):
if line.strip().startswith("[doctest]"):
return None
if self.is_divider(line):
return self._on_divider()
if not self._tmp_tc:
self._tmp_tc = TestCase("", TestStatus.PASSED, stdout="")
self._name_tokens = []
self._tmp_tc.stdout += line
line = line.strip()
# source
if not self._tmp_tc.source and line:
self._tmp_tc.source = self.parse_source(line)
return None
# name
if not self._tmp_tc.name:
if line:
self._name_tokens.append(line)
return None
self._tmp_tc.name = self.parse_name(self._name_tokens)
return None
if self._tmp_tc.status != TestStatus.FAILED:
self._parse_assert(line)
return None
@staticmethod
def is_divider(line):
line = line.strip()
return line.startswith("===") and line.endswith("===")
def _on_divider(self):
# if the first unprocessed test case
if not self._tmp_tc:
return None
test_case = TestCase(
name=self._tmp_tc.name,
status=self._tmp_tc.status,
message=self._tmp_tc.message,
source=self._tmp_tc.source,
stdout=self._tmp_tc.stdout,
)
self._tmp_tc = None
return test_case
@staticmethod
def parse_source(line):
assert line.endswith(":"), line
file_, line = line[:-1].rsplit(":", 1)
return TestCaseSource(file_, int(line))
@staticmethod
def parse_name(tokens):
cleaned_tokens = []
for token in tokens:
if token.startswith("TEST ") and ":" in token:
token = token[token.index(":") + 1 :]
cleaned_tokens.append(token.strip())
return " -> ".join(cleaned_tokens)
def _parse_assert(self, line):
status_tokens = [
(TestStatus.FAILED, "ERROR"),
(TestStatus.FAILED, "FATAL ERROR"),
(TestStatus.WARNED, "WARNING"),
]
for status, token in status_tokens:
index = line.find(": %s:" % token)
if index == -1:
continue
self._tmp_tc.status = status
self._tmp_tc.message = line[index + len(token) + 3 :].strip() or None
class DoctestTestRunner(TestRunnerBase):
EXTRA_LIB_DEPS = ["doctest/doctest@^2.4.8"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._tc_parser = DoctestTestCaseParser()
def configure_build_env(self, env):
if "-std=" not in env.subst("$CXXFLAGS"):
env.Append(CXXFLAGS=["-std=c++11"])
env.Append(CPPDEFINES=["DOCTEST_CONFIG_COLORS_NONE"])
def on_testing_line_output(self, line):
if self.options.verbose:
click.echo(line, nl=False)
test_case = self._tc_parser.parse(line)
if test_case:
self._tc_parser = DoctestTestCaseParser()
click.echo(test_case.humanize())
self.test_suite.add_case(test_case)
if "[doctest] Status:" in line:
self.test_suite.on_finish()
|
Python
| 0.000007 |
@@ -892,81 +892,8 @@
e):%0A
- if line.strip().startswith(%22%5Bdoctest%5D%22):%0A return None%0A
@@ -956,25 +956,24 @@
n_divider()%0A
-%0A
if n
@@ -991,79 +991,49 @@
p_tc
-:%0A self._tmp_tc = TestCase(%22%22, TestStatus.PASSED, stdout=%22
+ or line.strip().startswith(%22%5Bdoctest%5D
%22)
+:
%0A
@@ -1041,38 +1041,27 @@
-self._name_tokens = %5B%5D
+return None
%0A%0A
@@ -1788,43 +1788,23 @@
-# if the first unprocessed test cas
+test_case = Non
e%0A
@@ -1804,36 +1804,32 @@
None%0A if
-not
self._tmp_tc:%0A
@@ -1830,36 +1830,16 @@
tc:%0A
- return None%0A
@@ -1860,16 +1860,20 @@
stCase(%0A
+
@@ -1898,18 +1898,30 @@
_tc.name
-,%0A
+.strip(),%0A
@@ -1968,16 +1968,20 @@
+
+
message=
self
@@ -1976,16 +1976,17 @@
message=
+(
self._tm
@@ -1997,18 +1997,45 @@
.message
-,%0A
+ or %22%22).strip() or None,%0A
@@ -2078,16 +2078,20 @@
+
+
stdout=s
@@ -2108,26 +2108,37 @@
c.stdout
-,%0A
+.strip(),%0A
)%0A
@@ -2129,17 +2129,19 @@
-)
+ )%0A
%0A
@@ -2148,36 +2148,105 @@
self._tmp_tc =
-None
+TestCase(%22%22, TestStatus.PASSED, stdout=%22%22)%0A self._name_tokens = %5B%5D
%0A return
@@ -2310,21 +2310,21 @@
-asser
+if no
t line.e
@@ -2335,20 +2335,39 @@
ith(%22:%22)
-, li
+:%0A return No
ne%0A
@@ -2373,17 +2373,20 @@
file
-_
+name
, line =
@@ -2445,17 +2445,20 @@
rce(file
-_
+name
, int(li
@@ -2748,12 +2748,9 @@
rn %22
- -%3E
+/
%22.jo
@@ -3860,62 +3860,8 @@
se:%0A
- self._tc_parser = DoctestTestCaseParser()%0A
|
5324cfaf23b9907f6a6cb54bf03fdc07b5d3f56c
|
Fix some tests
|
plenum/test/batching_3pc/helper.py
|
plenum/test/batching_3pc/helper.py
|
import types
from binascii import hexlify
from plenum.common.constants import DOMAIN_LEDGER_ID
from plenum.common.messages.node_messages import ThreePhaseType
from plenum.common.startable import Mode
from plenum.common.txn_util import reqToTxn, append_txn_metadata
from plenum.common.util import check_if_all_equal_in_list
from plenum.server.batch_handlers.three_pc_batch import ThreePcBatch
def checkNodesHaveSameRoots(nodes, checkUnCommitted=True,
checkCommitted=True,
checkLastOrderedPpSeqNo=True,
checkSeqNoDb=True):
def addRoot(root, collection):
if root:
collection.add(hexlify(root))
else:
collection.add(root)
if checkLastOrderedPpSeqNo:
ppSeqNos = set()
for node in nodes:
ppSeqNos.add(node.replicas[0].last_ordered_3pc)
assert len(ppSeqNos) == 1
if checkSeqNoDb:
seqNoSizes = set()
for node in nodes:
seqNoSizes.add(node.seqNoDB.size)
assert len(seqNoSizes) == 1
if checkUnCommitted:
stateRoots = set()
txnRoots = set()
for node in nodes:
addRoot(node.getState(DOMAIN_LEDGER_ID).headHash, stateRoots)
addRoot(node.getLedger(DOMAIN_LEDGER_ID).uncommittedRootHash,
txnRoots)
assert len(stateRoots) == 1
assert len(txnRoots) == 1
if checkCommitted:
stateRoots = set()
txnRoots = set()
for node in nodes:
addRoot(node.getState(DOMAIN_LEDGER_ID).committedHeadHash,
stateRoots)
addRoot(node.getLedger(DOMAIN_LEDGER_ID).tree.root_hash,
txnRoots)
assert len(stateRoots) == 1
assert len(txnRoots) == 1
def add_txns_to_ledger_before_order(replica, reqs):
replica.added = False
origMethod = replica.tryOrder
def tryOrderAndAddTxns(self, commit):
canOrder, _ = self.canOrder(commit)
node = replica.node
if not replica.added and canOrder:
pp = self.getPrePrepare(commit.viewNo, commit.ppSeqNo)
ledger_manager = node.ledgerManager
ledger_id = DOMAIN_LEDGER_ID
ledger = ledger_manager.ledgerRegistry[ledger_id].ledger
ledgerInfo = ledger_manager.getLedgerInfoByType(ledger_id)
# simulate audit ledger catchup
three_pc_batch = ThreePcBatch.from_pre_prepare(pre_prepare=pp,
valid_txn_count=len(reqs),
state_root=pp.stateRootHash,
txn_root=pp.txnRootHash)
node.audit_handler.post_batch_applied(three_pc_batch)
node.audit_handler.commit_batch(ledger_id, len(reqs), pp.stateRootHash, pp.txnRootHash, pp.ppTime)
ledger_manager.preCatchupClbk(ledger_id)
pp = self.getPrePrepare(commit.viewNo, commit.ppSeqNo)
for req in reqs:
txn = append_txn_metadata(reqToTxn(req), txn_time=pp.ppTime)
ledger_manager._add_txn(
ledger_id, ledger, ledgerInfo, txn)
ledger_manager.catchupCompleted(
DOMAIN_LEDGER_ID, (node.viewNo, commit.ppSeqNo))
replica.added = True
return origMethod(commit)
replica.tryOrder = types.MethodType(tryOrderAndAddTxns, replica)
def start_precatchup_before_order(replica):
called = False
origMethod = replica.tryOrder
def tryOrderAndAddTxns(self, commit):
nonlocal called
canOrder, _ = self.canOrder(commit)
if not called and canOrder:
ledger_manager = replica.node.ledgerManager
ledger_manager.preCatchupClbk(DOMAIN_LEDGER_ID)
called = True
return origMethod(commit)
replica.tryOrder = types.MethodType(tryOrderAndAddTxns, replica)
def make_node_syncing(replica, three_phase_type: ThreePhaseType):
added = False
def specificPrePrepares(wrappedMsg):
msg, sender = wrappedMsg
nonlocal added
node = replica.node
if isinstance(msg, three_phase_type) and not added:
node.mode = Mode.syncing
added = True
return 0
replica.node.nodeIbStasher.delay(specificPrePrepares)
def fail_on_execute_batch_on_master(node):
def fail_process_ordered(self, ordered):
if ordered.instId == 0:
raise Exception('Should not process Ordered at this point')
node.processOrdered = types.MethodType(fail_process_ordered, node)
def check_uncommitteds_equal(nodes):
t_roots = [node.domainLedger.uncommittedRootHash for node in nodes]
s_roots = [node.states[DOMAIN_LEDGER_ID].headHash for node in nodes]
assert check_if_all_equal_in_list(t_roots)
assert check_if_all_equal_in_list(s_roots)
return t_roots[0], s_roots[0]
def node_caughtup(node, old_count):
assert node.spylog.count(node.allLedgersCaughtUp) > old_count
|
Python
| 0.000006 |
@@ -2255,135 +2255,82 @@
-ledger = ledger_manager.ledgerRegistry%5Bledger_id%5D.ledger%0A ledgerInfo = ledger_manager.getLedgerInfoByType(ledger_id)
+catchup_rep_service = ledger_manager._catchup_gatherers%5Bledger_id%5D.service
%0A%0A
@@ -3128,84 +3128,37 @@
-ledger_manager._add_txn(%0A ledger_id, ledger, ledgerInfo,
+catchup_rep_service._add_txn(
txn)
|
ab23ea60457720d0a7414b1b84191945f529b23c
|
Update _version.py
|
fabsetup/_version.py
|
fabsetup/_version.py
|
__version__ = "0.7.9"
|
Python
| 0.000002 |
@@ -14,9 +14,52 @@
%220.7.9%22
+ # semantic versioning: https://semver.org
%0A
|
28c8d1cc6df216dfe1f3bcfa3eb70bb590204613
|
implement post_vote()
|
pybooru/api_danbooru.py
|
pybooru/api_danbooru.py
|
# -*- coding: utf-8 -*-
"""pybooru.api_danbooru
This module contains all API calls of Danbooru for Pybooru.
Classes:
Danbooru -- Contains all API calls.
"""
# __future__ imports
from __future__ import absolute_import
# pybooru imports
from .exceptions import PybooruAPIError
class DanbooruApi(object):
"""Contains all Danbooru API calls.
API Versions: v2.105.0
doc: https://danbooru.donmai.us/wiki_pages/43568
"""
def post_list(self, **params):
"""Get a list of posts.
Parameters:
limit: How many posts you want to retrieve. There is a hard limit
of 100 posts per request.
page: The page number.
tags: The tags to search for. Any tag combination that works on the
web site will work here. This includes all the meta-tags.
raw: When this parameter is set the tags parameter will not be
parsed for aliased tags, metatags or multiple tags, and will
instead be parsed as a single literal tag.
"""
return self._get('posts.json', params)
def post_show(self, id_):
"""Get a post.
Parameters:
id_: where id_ is the post id.
"""
return self._get('/posts/{0}.json'.format(id_))
def post_update(self, id_, tag_string=None, rating=None, source=None,
parent_id=None):
"""Update a specific post (Requires login).
Parameters:
id_: The id number of the post to update.
tag_string: A space delimited list of tags.
rating: The rating for the post. Can be: safe, questionable, or
explicit.
source: If this is a URL, Danbooru will download the file.
parent_id: The ID of the parent post.
"""
params = {
'post[tag_string]': tag_string,
'post[rating]': rating,
'ost[source]': source,
'post[parent_id]': parent_id
}
return self._get('/posts/{0}.json'.format(id_), params, 'PUT')
def post_revert(self, id_, version_id):
"""Function to reverts a post to a previous version (Requires login).
Parameters:
id_: REQUIRED post id.
version_id: REQUIRED The post version id to revert to.
"""
return self._get('/posts/{0}/revert.json'.format(id_),
{'version_id': version_id}, 'PUT')
def post_copy_notes(self, id_, other_post_id):
"""Function to copy notes (requires login).
Parameters:
id_: Post id.
other_post_id: REQUIRED The id of the post to copy notes to.
"""
return self._get('/posts/{0}/copy_notes.json'.format(id_),
{'other_post_id': other_post_id}, 'PUT')
|
Python
| 0.000009 |
@@ -1202,17 +1202,26 @@
id_:
-w
+REQUIRED W
here id_
@@ -1506,16 +1506,25 @@
id_:
+ REQUIRED
The id
@@ -2630,16 +2630,25 @@
id_:
+ REQUIRED
Post id
@@ -2855,20 +2855,383 @@
er_post_id%7D, 'PUT')%0A
+%0A def post_vote(self, id_, score):%0A %22%22%22Action lets you vote for a post (Requires login).%0A Danbooru: Post votes/create%0A%0A Parameters:%0A id_: REQUIRED Ppost id.%0A score: REQUIRED Can be: up, down.%0A %22%22%22%0A return self._get('/posts/%7B0%7D/votes.json'.format(id_), %7B'score': score%7D,%0A 'POST')%0A
|
2e1aae60ba563f13d1b0aafebae534592cf0d503
|
Add from_cloud method
|
pydgraph/client_stub.py
|
pydgraph/client_stub.py
|
# Copyright 2018 Dgraph Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stub for RPC request."""
import grpc
from pydgraph.meta import VERSION
from pydgraph.proto import api_pb2_grpc as api_grpc
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
__author__ = 'Garvit Pahal <[email protected]>'
__maintainer__ = 'Martin Martinez Rivera <[email protected]>'
__version__ = VERSION
__status__ = 'development'
class DgraphClientStub(object):
"""Stub for the Dgraph grpc client."""
def __init__(self, addr='localhost:9080', credentials=None, options=None):
if credentials is None:
self.channel = grpc.insecure_channel(addr, options)
else:
self.channel = grpc.secure_channel(addr, credentials, options)
self.stub = api_grpc.DgraphStub(self.channel)
def login(self, login_req, timeout=None, metadata=None, credentials=None):
return self.stub.Login(login_req, timeout=timeout, metadata=metadata,
credentials=credentials)
def alter(self, operation, timeout=None, metadata=None, credentials=None):
"""Runs alter operation."""
return self.stub.Alter(operation, timeout=timeout, metadata=metadata,
credentials=credentials)
def async_alter(self, operation, timeout=None, metadata=None, credentials=None):
"""Async version of alter."""
return self.stub.Alter.future(operation, timeout=timeout, metadata=metadata,
credentials=credentials)
def query(self, req, timeout=None, metadata=None, credentials=None):
"""Runs query or mutate operation."""
return self.stub.Query(req, timeout=timeout, metadata=metadata,
credentials=credentials)
def async_query(self, req, timeout=None, metadata=None, credentials=None):
"""Async version of query."""
return self.stub.Query.future(req, timeout=timeout, metadata=metadata,
credentials=credentials)
def commit_or_abort(self, ctx, timeout=None, metadata=None,
credentials=None):
"""Runs commit or abort operation."""
return self.stub.CommitOrAbort(ctx, timeout=timeout, metadata=metadata,
credentials=credentials)
def check_version(self, check, timeout=None, metadata=None,
credentials=None):
"""Returns the version of the Dgraph instance."""
return self.stub.CheckVersion(check, timeout=timeout,
metadata=metadata,
credentials=credentials)
def close(self):
"""Deletes channel and stub."""
try:
self.channel.close()
except:
pass
del self.channel
del self.stub
# from_slash_endpoint is deprecated and will be removed in v21.07 release. For more details,
# see: https://discuss.dgraph.io/t/regarding-slash-cloud-dgraph-endpoints-in-the-clients/13492
@staticmethod
def from_slash_endpoint(slash_end_point, api_key):
"""Returns Dgraph Client stub for the Slash GraphQL endpoint"""
url = urlparse(slash_end_point)
url_parts = url.netloc.split(".", 1)
host = url_parts[0] + ".grpc." + url_parts[1]
creds = grpc.ssl_channel_credentials()
call_credentials = grpc.metadata_call_credentials(
lambda context, callback: callback((("authorization", api_key),), None))
composite_credentials = grpc.composite_channel_credentials(
creds, call_credentials)
client_stub = DgraphClientStub('{host}:{port}'.format(
host=host, port="443"), composite_credentials, options=(('grpc.enable_http_proxy', 0),))
return client_stub
|
Python
| 0.000001 |
@@ -3517,125 +3517,373 @@
ase.
- For more details,%0A # see: https://discuss.dgraph.io/t/regarding-slash-cloud-dgraph-endpoints-in-the-clients/13492
+%0A # Use from_cloud method to connect to dgraph cloud backend.%0A @staticmethod%0A def from_slash_endpoint(cloud_end_point, api_key):%0A return from_cloud(cloud_end_point, api_key)%0A %0A # Usage:%0A # import pydgraph%0A # client_stub = pydgraph.DgraphClientStub.from_cloud(%22cloud_endpoint%22, %22api-key%22)%0A # client = pydgraph.DgraphClient(client_stub)
%0A
@@ -3914,28 +3914,19 @@
rom_
-slash_endpoint(slash
+cloud(cloud
_end
@@ -3939,28 +3939,24 @@
, api_key):%0A
-
%22%22%22Retur
|
8c4edd4cc8fdd6c7c470e25436b6c6b4c146ad58
|
Fix error casting datetime objects
|
data-analysis/utils.py
|
data-analysis/utils.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Daniel Izquierdo <[email protected]>
# Santiago Dueñas <[email protected]>
#
from __future__ import absolute_import
try:
import configparser
except ImportError:
import ConfigParser as configparser
import datetime
import elasticsearch
import numpy
try:
import pymysql as mysql
except ImportError:
import MySQLdb as mysql
def read_config_file(filepath):
"""Read configuration file"""
cfg_parser = configparser.SafeConfigParser()
cfg_parser.read(filepath)
config = {}
for section in ['mysql', 'elasticsearch']:
if section not in cfg_parser.sections():
cause = "Section %s not found in the %s file" % (section, filepath)
raise KeyError(cause)
config[section] = dict(cfg_parser.items(section))
return config
def to_dict(row, columns):
"""Translates from tuple to a dict"""
d = {}
for column in columns:
value = row[columns.index(column) + 1]
if isinstance(value, numpy.int64):
value = int(value)
elif isinstance(value, numpy.float64):
value = float(value)
elif isinstance(value, datetime.datetime):
value = str(value)
else:
value = str(value)
d[column] = value
return d
def create_mysql_connection(user, password, host, db):
"""Connect to a MySQL server"""
db = mysql.connect(host=host, user=user, passwd=password, db=db,
charset='utf8')
return db.cursor()
def execute_mysql_query(conn, query):
"""Execute a MySQL query"""
n = int(conn.execute(query))
results = conn.fetchall() if n else []
return results
def create_elasticsearch_connection(url, user, password):
"""Connect to a ES server"""
conn = elasticsearch.Elasticsearch([url], http_auth=(user, password))
return conn
|
Python
| 0.000003 |
@@ -1000,25 +1000,8 @@
er%0A%0A
-import datetime%0A%0A
impo
@@ -1882,135 +1882,8 @@
lue)
-%0A elif isinstance(value, datetime.datetime):%0A value = str(value)%0A else:%0A value = str(value)
%0A%0A
|
9f405a3b4e01ee0a42a8530cfc5b509a38067250
|
Remove unused import
|
mugloar/dragon.py
|
mugloar/dragon.py
|
import json
class Dragon:
# By default, stay home.
scaleThickness = 0
clawSharpness = 0
wingStrength = 0
fireBreath = 0
def __init__(self, weather_code):
if weather_code == 'T E':
# Draught requires a 'balanced' dragon, ha ha
self.scaleThickness = 5
self.clawSharpness = 5
self.wingStrength = 5
self.fireBreath = 5
elif weather_code == 'FUNDEFINEDG':
# Fog means we're unseen, no need to fly
self.scaleThickness = 8
self.clawSharpness = 8
self.wingStrength = 0
self.fireBreath = 4
elif weather_code == 'NMR':
self.scaleThickness = 6
self.clawSharpness = 6
self.wingStrength = 4
self.fireBreath = 4
elif weather_code == 'SRO':
# Stay at home if there's a storm.
pass
else:
# Fire is useless in the rain. Additional claw-sharpening is needed to destroy the umbrellaboats
self.scaleThickness = 5
self.clawSharpness = 10
self.wingStrength = 5
self.fireBreath = 0
def get_json(self):
return {"dragon": {
"scaleThickness": self.scaleThickness,
"clawSharpness": self.clawSharpness,
"wingStrength": self.wingStrength,
"fireBreath": self.fireBreath}}
|
Python
| 0.000001 |
@@ -1,17 +1,4 @@
-import json%0A%0A
%0Acla
@@ -691,25 +691,25 @@
Thickness =
-6
+3
%0A
@@ -760,25 +760,25 @@
gStrength =
-4
+5
%0A
@@ -788,33 +788,33 @@
lf.fireBreath =
-4
+6
%0A elif we
|
b8701f04d049101c8c92b468b4fc3dc863f1e292
|
Add bulk accept and reject for talks
|
pygotham/admin/talks.py
|
pygotham/admin/talks.py
|
"""Admin for talk-related models."""
from pygotham.admin.utils import model_view
from pygotham.talks import models
__all__ = ('CategoryModelView', 'TalkModelView', 'TalkReviewModelView')
CategoryModelView = model_view(
models.Category,
'Categories',
'Talks',
form_columns=('name', 'slug'),
)
TalkModelView = model_view(
models.Talk,
'Talks',
'Talks',
column_filters=('status', 'duration', 'level'),
column_list=('name', 'status', 'duration', 'level', 'type', 'user'),
column_searchable_list=('name',),
)
TalkReviewModelView = model_view(
models.Talk,
'Review',
'Talks',
can_create=False,
can_delete=False,
column_list=('name', 'status', 'level', 'type', 'user'),
column_searchable_list=('name',),
edit_template='talks/review.html',
)
|
Python
| 0.000038 |
@@ -31,16 +31,104 @@
ls.%22%22%22%0A%0A
+from flask.ext.admin import actions%0Afrom flask.ext.admin.contrib.sqla import ModelView%0A%0A
from pyg
@@ -163,16 +163,45 @@
el_view%0A
+from pygotham.core import db%0A
from pyg
@@ -260,26 +260,28 @@
View', '
-TalkM
+talk_m
odel
-V
+_v
iew', 'T
@@ -302,16 +302,965 @@
View')%0A%0A
+CATEGORY = 'Talks'%0A%0A%0Aclass TalkModelView(ModelView, actions.ActionsMixin):%0A%0A %22%22%22Admin view for :class:%60~pygotham.models.Talk%60.%22%22%22%0A%0A column_filters = ('status', 'duration', 'level')%0A column_list = ('name', 'status', 'duration', 'level', 'type', 'user')%0A column_searchable_list = ('name',)%0A%0A def __init__(self, *args, **kwargs):%0A super().__init__(*args, **kwargs)%0A self.init_actions()%0A%0A @actions.action(%0A 'accept', 'Accept', 'Are you sure you want to accept selected models?')%0A def approve(self, talks):%0A for pk in talks:%0A talk = models.Talk.query.get(pk)%0A talk.status = 'accepted'%0A self.session.commit()%0A%0A @actions.action(%0A 'reject', 'Reject', 'Are you sure you want to reject selected models?')%0A def reject(self, talks):%0A for pk in talks:%0A talk = models.Talk.query.get(pk)%0A talk.status = 'rejected'%0A self.session.commit()%0A
%0ACategor
@@ -1323,31 +1323,32 @@
ories',%0A
-'Talks'
+CATEGORY
,%0A form_c
@@ -1379,31 +1379,36 @@
%0A)%0A%0A
-TalkM
+talk_m
odel
-V
+_v
iew =
-m
+TalkM
odel
-_v
+V
iew(
@@ -1428,199 +1428,49 @@
alk,
-%0A 'Talks',%0A 'Talks',%0A column_filters=('status', 'durat
+ db.sess
ion
-'
, '
-level'),%0A column_list=('name', 'status', 'duration', 'level', 'type', 'user'),%0A column_searchable_list=('name',),%0A)
+Talks', CATEGORY, 'talks')%0A
%0A%0ATa
@@ -1536,23 +1536,24 @@
w',%0A
-'Talks'
+CATEGORY
,%0A ca
|
377d729dfa7910a477f24f7fe44d3648a5f631de
|
add help text to tools/generate_tags_and_topics.py; close #437
|
tools/generate_tags_and_topics.py
|
tools/generate_tags_and_topics.py
|
#!/usr/bin/env python
import sys, os, json, random
if len(sys.argv) < 2:
print("Usage: generate_tags_and_topics <coursedir>")
sys.exit(0)
course_dir = sys.argv[1]
if not os.path.isdir(course_dir):
print("ERROR: Not a directory: %s" % course_dir)
sys.exit(1)
######################################################################
# read in existing topics and tags
course_info_file_name = os.path.join(course_dir, 'courseInfo.json')
try:
with open(course_info_file_name) as course_info_file:
course_info = json.load(course_info_file)
except Exception as error:
print("ERROR: Unable to read %s: %s" % (course_info_file_name, error))
sys.exit(1)
existing_tags = set()
existing_tag_colors = set()
if 'tags' in course_info:
existing_tags = set([t['name'] for t in course_info['tags']])
existing_tag_colors = set([t['color'] for t in course_info['tags']])
existing_topics = set()
existing_topic_colors = set()
if 'topics' in course_info:
existing_topics = set([t['name'] for t in course_info['topics']])
existing_topic_colors = set([t['color'] for t in course_info['topics']])
questions_dir = os.path.join(course_dir, 'questions')
if not os.path.isdir(questions_dir):
print("ERROR: Not a directory: %s" % questions_dir)
sys.exit(1)
######################################################################
# read in question topics and tags
tags = set()
topics = set()
question_dir_names = os.listdir(questions_dir)
for question_dir_name in question_dir_names:
question_path = os.path.join(questions_dir, question_dir_name)
if os.path.isdir(question_path):
info_file_name = os.path.join(question_path, 'info.json')
try:
with open(info_file_name) as info_file:
question_info = json.load(info_file)
if 'tags' in question_info:
tags |= set(question_info['tags'])
if 'topic' in question_info:
topics.add(question_info['topic'])
except Exception as error:
print("WARNING: skipping %s: %s" % (question_path, error))
new_tags = tags - existing_tags
new_topics = topics - existing_topics
######################################################################
# assign colors
all_colors = set([
'red1', 'red2', 'red3',
'pink1', 'pink2', 'pink3',
'purple1', 'purple2', 'purple3',
'blue1', 'blue2', 'blue3',
'turquoise1', 'turquoise2', 'turquoise3',
'green1', 'green2', 'green3',
'yellow1', 'yellow2', 'yellow3',
'orange1', 'orange2', 'orange3',
'brown1', 'brown2', 'brown3',
'gray1', 'gray2', 'gray3',
])
available_tag_colors = all_colors - existing_tag_colors
available_topic_colors = all_colors - existing_topic_colors
new_tags_list = []
for tag in new_tags:
if len(available_tag_colors) > 0:
color = random.sample(available_tag_colors, 1)[0]
available_tag_colors.remove(color)
else:
color = random.sample(all_colors, 1)[0]
new_tags_list.append({"name": tag, "color": color})
new_topics_list = []
for topic in new_topics:
if len(available_topic_colors) > 0:
color = random.sample(available_topic_colors, 1)[0]
available_topic_colors.remove(color)
else:
color = random.sample(all_colors, 1)[0]
new_topics_list.append({"name": topic, "color": color})
new_tags_list.sort(key=lambda x: x["name"])
new_topics_list.sort(key=lambda x: x["name"])
######################################################################
# print output
print("{")
print(" \"topics\": [")
for (i, new_topic) in enumerate(new_topics_list):
trailing_comma = ","
if i >= len(new_topics_list) - 1:
trailing_comma = ""
print(" {\"name\": \"%s\", \"color\": \"%s\"}%s" % (new_topic["name"], new_topic["color"], trailing_comma))
print(" ],")
print(" \"tags\": [")
for (i, new_tag) in enumerate(new_tags_list):
trailing_comma = ","
if i >= len(new_tags_list) - 1:
trailing_comma = ""
print(" {\"name\": \"%s\", \"color\": \"%s\"}%s" % (new_tag["name"], new_tag["color"], trailing_comma))
print(" ]")
print("}")
|
Python
| 0 |
@@ -3541,16 +3541,95 @@
output%0A%0A
+print(%22New tags and topics not already present in %25s%22 %25 course_info_file_name)%0A
print(%22%7B
|
eab0e21b1d5aac6c5be14a84fade651103f3e910
|
Use wchar functions consistently
|
pymediainfo/__init__.py
|
pymediainfo/__init__.py
|
import json
import os
import sys
from pkg_resources import get_distribution
import xml.etree.ElementTree as ET
from ctypes import *
__version__ = get_distribution("pymediainfo").version
class Track(object):
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except:
pass
return None
def __init__(self, xml_dom_fragment):
self.xml_dom_fragment = xml_dom_fragment
self.track_type = xml_dom_fragment.attrib['type']
for el in self.xml_dom_fragment:
node_name = el.tag.lower().strip().strip('_')
if node_name == 'id':
node_name = 'track_id'
node_value = el.text
other_node_name = "other_%s" % node_name
if getattr(self, node_name) is None:
setattr(self, node_name, node_value)
else:
if getattr(self, other_node_name) is None:
setattr(self, other_node_name, [node_value, ])
else:
getattr(self, other_node_name).append(node_value)
for o in [d for d in self.__dict__.keys() if d.startswith('other_')]:
try:
primary = o.replace('other_', '')
setattr(self, primary, int(getattr(self, primary)))
except:
for v in getattr(self, o):
try:
current = getattr(self, primary)
setattr(self, primary, int(v))
getattr(self, o).append(current)
break
except:
pass
def __repr__(self):
return("<Track track_id='{0}', track_type='{1}'>".format(self.track_id, self.track_type))
def to_data(self):
data = {}
for k, v in self.__dict__.items():
if k != 'xml_dom_fragment':
data[k] = v
return data
class MediaInfo(object):
def __init__(self, xml):
self.xml_dom = MediaInfo.parse_xml_data_into_dom(xml)
@staticmethod
def parse_xml_data_into_dom(xml_data):
try:
return ET.fromstring(xml_data.encode("utf-8"))
except:
return None
@staticmethod
def parse(filename):
if os.name in ("nt", "dos", "os2", "ce"):
lib = windll.MediaInfo
elif sys.platform == "darwin":
try:
lib = CDLL("libmediainfo.0.dylib")
except OSError:
lib = CDLL("libmediainfo.dylib")
else:
lib = CDLL("libmediainfo.so.0")
# Define arguments and return types
lib.MediaInfo_Inform.restype = c_wchar_p
lib.MediaInfo_New.argtypes = []
lib.MediaInfo_New.restype = c_void_p
lib.MediaInfo_Option.argtypes = [c_void_p, c_wchar_p, c_wchar_p]
lib.MediaInfo_Option.restype = c_wchar_p
lib.MediaInfoA_Option.argtypes = [c_void_p, c_char_p, c_char_p]
lib.MediaInfoA_Option.restype = c_char_p
lib.MediaInfo_Inform.argtypes = [c_void_p, c_size_t]
lib.MediaInfo_Inform.restype = c_wchar_p
lib.MediaInfoA_Open.argtypes = [c_void_p, c_char_p]
lib.MediaInfoA_Open.restype = c_size_t
lib.MediaInfo_Delete.argtypes = [c_void_p]
lib.MediaInfo_Delete.restype = None
lib.MediaInfo_Close.argtypes = [c_void_p]
lib.MediaInfo_Close.restype = None
# Create a MediaInfo handle
handle = lib.MediaInfo_New()
lib.MediaInfo_Option(handle, "CharSet", "UTF-8")
lib.MediaInfoA_Option(None, b"Inform", b"XML")
lib.MediaInfoA_Option(None, b"Complete", b"1")
lib.MediaInfoA_Open(handle, filename.encode("utf8"))
xml = lib.MediaInfo_Inform(handle, 0)
# Delete the handle
lib.MediaInfo_Close(handle)
lib.MediaInfo_Delete(handle)
return MediaInfo(xml)
def _populate_tracks(self):
if self.xml_dom is None:
return
for xml_track in self.xml_dom.iter("track"):
self._tracks.append(Track(xml_track))
@property
def tracks(self):
if not hasattr(self, "_tracks"):
self._tracks = []
if len(self._tracks) == 0:
self._populate_tracks()
return self._tracks
def to_data(self):
data = {'tracks': []}
for track in self.tracks:
data['tracks'].append(track.to_data())
return data
def to_json(self):
return json.dumps(self.to_data())
|
Python
| 0 |
@@ -2926,129 +2926,8 @@
r_p%0A
- lib.MediaInfoA_Option.argtypes = %5Bc_void_p, c_char_p, c_char_p%5D%0A lib.MediaInfoA_Option.restype = c_char_p%0A
@@ -3049,25 +3049,24 @@
ib.MediaInfo
-A
_Open.argtyp
@@ -3083,16 +3083,17 @@
id_p, c_
+w
char_p%5D%0A
@@ -3109,25 +3109,24 @@
ib.MediaInfo
-A
_Open.restyp
@@ -3470,33 +3470,32 @@
lib.MediaInfo
-A
_Option(None, b%22
@@ -3492,17 +3492,16 @@
n(None,
-b
%22Inform%22
@@ -3502,17 +3502,16 @@
nform%22,
-b
%22XML%22)%0A
@@ -3530,17 +3530,16 @@
ediaInfo
-A
_Option(
@@ -3544,17 +3544,16 @@
n(None,
-b
%22Complet
@@ -3556,17 +3556,16 @@
plete%22,
-b
%221%22)%0A
@@ -3586,9 +3586,8 @@
Info
-A
_Ope
@@ -3608,23 +3608,8 @@
name
-.encode(%22utf8%22)
)%0A
|
54bb5a2320fb88daf9c24ad7b6a9b6cb0a6ab0cc
|
add send_image_url
|
pymessenger/send_api.py
|
pymessenger/send_api.py
|
import json
import requests
from requests_toolbelt import MultipartEncoder
DEFAULT_API_VERSION = 2.6
class SendApiClient(object):
def __init__(self, access_token, api_version=DEFAULT_API_VERSION):
self.api_version = api_version
self.access_token = access_token
self.base_url = (
"https://graph.facebook.com"
"/v{0}/me/messages?access_token={1}"
).format(self.api_version, access_token)
def send(self, recipient_id, message_type, **kwargs):
if message_type == 'text':
message_text = kwargs['text']
response = self.send_text_message(recipient_id, message_text)
elif message_type == 'button':
message_text = kwargs['text']
buttons = kwargs['buttons']
response = self.send_button_message(recipient_id, message_text, buttons)
else:
response = "Message type {0} currently unsupported.".format(message_type)
return response
def send_text_message(self, recipient_id, message_text):
payload = {
'recipient': {
'id': recipient_id
},
'message': {
'text': message_text
}
}
return self._send_payload(payload)
def send_message(self, recipient_id, message):
payload = {
'recipient': {
'id': recipient_id
},
'message': message
}
return self._send_payload(payload)
def send_generic_message(self, recipient_id, elements):
payload = {
'recipient': {
'id': recipient_id
},
'message': {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"elements": elements
}
}
}
}
return self._send_payload(payload)
def send_button_message(self, recipient_id, text, buttons):
payload = {
'recipient': {
'id': recipient_id
},
'message': {
"attachment": {
"type": "template",
"payload": {
"template_type": "button",
"text": text,
"buttons": buttons
}
}
}
}
return self._send_payload(payload)
def _send_payload(self, payload):
result = requests.post(self.base_url, json=payload).json()
return result
def send_image(self, recipient_id, image_path):
'''
This sends an image to the specified recipient.
Input:
recipient_id: recipient id to send to
image_path: path to image to be sent
Output:
Response from API as <dict>
'''
payload = {
'recipient': json.dumps(
{
'id': recipient_id
}
),
'message': json.dumps(
{
'attachment': {
'type': 'image',
'payload': {}
}
}
),
'filedata': (image_path, open(image_path, 'rb'))
}
multipart_data = MultipartEncoder(payload)
multipart_header = {
'Content-Type': multipart_data.content_type
}
return requests.post(self.base_url, data=multipart_data, headers=multipart_header).json()
|
Python
| 0.000002 |
@@ -983,18 +983,16 @@
sponse%0A%0A
-%0A%0A
def
@@ -3652,16 +3652,564 @@
_header).json()%0A
+%0A def send_image_url(self, recipient_id, image_url):%0A payload = %7B%0A 'recipient': json.dumps(%0A %7B%0A 'id': recipient_id%0A %7D%0A ),%0A 'message': json.dumps(%0A %7B%0A 'attachment': %7B%0A 'type': 'image',%0A 'payload': %7B%0A 'url': image_url%0A %7D%0A %7D%0A %7D%0A )%0A %7D%0A return self._send_payload(payload)%0A
|
ec261fdaf41bd91558e4df143be8dfd9940bde81
|
Rewrite bubble sort.
|
py/sorting/05_bubbleSort.py
|
py/sorting/05_bubbleSort.py
|
def bubbleSort(A):
tempValue = 0
for k in range(1, len(A)):
flag = 0
for i in range(0, len(A) - k):
if A[i+1] > A[i]:
tempValue = A[i+1]
A[i+1] = A[i]
A[i] = tempValue
flag += 1
if flag == 0:
break
return A
print(bubbleSort([1,4,55,3]))
|
Python
| 0.000001 |
@@ -16,75 +16,326 @@
A):%0A
-%0A tempValue = 0%0A%0A for k in range(1, len
+ for k in range(len(A)-1, 0, -1):%0A for i in range(k):%0A if A%5Bi%5D %3E A%5Bi+1%5D:%0A tempValue = A%5Bi%5D%0A A%5Bi%5D = A%5Bi+1%5D%0A A%5Bi+1%5D = tempValue%0A%0A return A%0A%0Aprint(bubbleSort(%5B54,26,93,17,77,31,44,55,20%5D))%0A%0Adef bubbleSortReverse
(A)
-)
:%0A
- flag = 0
+for k in range(len(A)-1, 0, -1):
%0A
@@ -358,20 +358,8 @@
nge(
-0, len(A) -
k):%0A
@@ -489,73 +489,8 @@
alue
-%0A flag += 1%0A%0A if flag == 0:%0A break
%0A%0A
@@ -521,18 +521,43 @@
Sort
-(%5B1,
+Reverse(%5B54,26,93,17,77,31,4
4,55,
-3
+20
%5D))%0A
|
2998a776a702d8d8fbd3e5f54f263fce55ba621c
|
Correct number of new links printed by first scrape
|
mutube/mutuber.py
|
mutube/mutuber.py
|
"""
Script to scrape /bleep/ and post to YouTube playlist
Upcoming improvements include
Management of `live_threads`.
This could then be presented as a combination of a module and a script,
with command line argument parsing, and inserted to mutube
"""
from .exceptions import NoPlaylist, BadVideo
from .playlister import Playlister, encode_tag, HttpError
from .scraper import Scraper
import time
class Mutuber():
""" Scrape from 4chan and post to YouTube playlists. """
def __init__(self, board, subjects, prefix, time_format, client_json,
playlister_pause=1, scraper_pause=None):
""" .
Args:
board ::: (str) abbreviated name of 4chan board to scrape
subjects ::: (list) titles of `board` threads to scrape
prefix, time_format ::: (str) playlist tag specs, see documentation
playlister_pause, scraper_pause ::: (int) minutes to pause between
posting to playlist and between scrape cycles, respectively
client_json ::: (str) path to YouTube OAuth 2.0 client credentials JSON
"""
# Initialise objects
self.scraper = Scraper(board, subjects)
self.playlister = Playlister(prefix, time_format, client_json)
# Initialise options ! should check within acceptable ranges
self.playlister_pause = playlister_pause
self.scraper_pause = scraper_pause
#! should not be on init -- let user choose whether to consider all playlists or just current
self.existing_ids = self.get_existing_ids()
def run_forever(self):
""" Run continuous scrape-post cycles, with a delay. """
while True:
self.run_once()
time.sleep(self.scraper_pause * 60) # space out scrapes
def run_once(self):
self.playlist = self.get_current_playlist() # get current playlist
self.scrape_and_insert_videos_to_playlist()
# Should be optionable for 'all' or 'current'
def get_existing_ids(self):
""" Return all video_ids posted in playlists tagged as specified. """
playlists = self.playlister.get_tagged_playlists()
existing_ids = set()
for playlist in playlists.values():
existing_ids.update(self.playlister.get_posted_yt_ids(playlist))
return existing_ids
def get_current_playlist(self):
""" Return current tagged playlist, creating one if necessary. """
# Create current tag
tag = encode_tag(self.playlister.prefix, time.localtime(),
self.playlister.time_format)
try: # retrieve existing playlist
playlist = self.playlister.get_playlist(tag)
print("Retrieved playlist for tag: {}".format(tag))
except NoPlaylist: # create new playlist
playlist = self.playlister.create_new_playlist(tag)
print("Created new playlist for tag: {}".format(tag))
return playlist
def scrape_and_insert_videos_to_playlist(self):
""" Scrape videos from 4chan and post to specified playlist. """
# Scrape videos from 4chan
self.scraper.scrape()
# Add scraped videos to playlist
for yt_id in self.scraper.yt_ids - self.existing_ids: # new videos only
try:
response = self.playlister.insert_vid_to_playlist(self.playlist,
yt_id)
self.existing_ids.add(yt_id)
print('Inserted: {}'.format(yt_id))
except BadVideo: # skip dead links
print('Failed to insert: {}'.format(yt_id))
time.sleep(self.playlister_pause * 60) # space out write requests
|
Python
| 0 |
@@ -1426,32 +1426,60 @@
_pause%0A %0A
+ # Get existing id's%0A
#! shoul
@@ -1615,16 +1615,70 @@
ng_ids()
+%0A self.scraper.yt_ids.update(self.existing_ids)
%0A%0A de
|
4f97405c7e034a5f1deae21e6dbec427c9d1816e
|
Set co_lnotab to the empty string
|
src/tblib/__init__.py
|
src/tblib/__init__.py
|
try:
from __pypy__ import tproxy
except ImportError:
tproxy = None
try:
from .cpython import tb_set_next
except ImportError:
tb_set_next = None
if not tb_set_next and not tproxy:
raise ImportError("Cannot use tblib. Runtime not supported.")
import sys
from types import CodeType
from types import TracebackType
PY3 = sys.version_info[0] == 3
class _AttrDict(dict):
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
raise AttributeError('No {} attribute'.format(attr))
class __traceback_maker(Exception):
pass
class Code(object):
def __init__(self, code):
self.co_filename = code.co_filename
self.co_name = code.co_name
self.co_nlocals = code.co_nlocals
self.co_stacksize = code.co_stacksize
self.co_flags = code.co_flags
self.co_firstlineno = code.co_firstlineno
self.co_lnotab = code.co_lnotab
class Frame(object):
def __init__(self, frame):
self.f_globals = {
k: v for k, v in frame.f_globals.items() if k in ("__file__", "__name__")
}
self.f_code = Code(frame.f_code)
class Traceback(object):
def __init__(self, tb):
self.tb_frame = Frame(tb.tb_frame)
self.tb_lineno = tb.tb_lineno
if tb.tb_next is None:
self.tb_next = None
else:
self.tb_next = Traceback(tb.tb_next)
def as_traceback(self):
if tproxy:
return tproxy(TracebackType, self.__tproxy_handler)
elif tb_set_next:
f_code = self.tb_frame.f_code
code = compile('\n' * (self.tb_lineno - 1) + 'raise __traceback_maker', self.tb_frame.f_code.co_filename, 'exec')
if PY3:
code = CodeType(
0, 0,
f_code.co_nlocals, f_code.co_stacksize, f_code.co_flags,
code.co_code, code.co_consts, code.co_names, code.co_varnames,
f_code.co_filename, f_code.co_name,
code.co_firstlineno, code.co_lnotab,
(), ()
)
else:
code = CodeType(
0,
f_code.co_nlocals, f_code.co_stacksize, f_code.co_flags,
code.co_code, code.co_consts, code.co_names, code.co_varnames,
f_code.co_filename, f_code.co_name,
code.co_firstlineno, code.co_lnotab,
(), ()
)
try:
exec(code, self.tb_frame.f_globals, {})
except:
tb = sys.exc_info()[2].tb_next
tb_set_next(tb, self.tb_next and self.tb_next.as_traceback())
return tb
else:
raise RuntimeError("Cannot re-create traceback !")
def __tproxy_handler(self, operation, *args, **kwargs):
if operation in ('__getattribute__', '__getattr__'):
if args[0] == 'tb_next':
return self.tb_next and self.tb_next.as_traceback()
else:
return getattr(self, args[0])
else:
return getattr(self, operation)(*args, **kwargs)
def to_dict(self):
"""Convert a Traceback into a dictionary representation"""
if self.tb_next is None:
tb_next = None
else:
tb_next = self.tb_next.to_dict()
code = {
k: v
for k, v in self.tb_frame.f_code.__dict__.items()
if k.startswith('co_')
}
code['co_lnotab'] = code['co_lnotab'].decode('latin1')
frame = {
'f_globals': self.tb_frame.f_globals,
'f_code': code
}
return {
'tb_frame': frame,
'tb_lineno': self.tb_lineno,
'tb_next': tb_next
}
@classmethod
def from_dict(cls, dct):
if dct['tb_next']:
tb_next = cls.from_dict(dct['tb_next'])
else:
tb_next = None
frame = _AttrDict((
('f_globals', dct['tb_frame']['f_globals']),
('f_code', _AttrDict((k, v) for k, v in dct['tb_frame']['f_code'].items()))
))
frame['f_code']['co_lnotab'] = frame['f_code']['co_lnotab'].encode('latin1')
tb = _AttrDict((
('tb_frame', frame),
('tb_lineno', dct['tb_lineno']),
('tb_next', tb_next)
))
return cls(tb)
|
Python
| 0.999999 |
@@ -3589,42 +3589,10 @@
%5D =
-code%5B'co_lnotab'%5D.decode('latin1')
+''
%0A
@@ -4179,93 +4179,8 @@
))%0A
- frame%5B'f_code'%5D%5B'co_lnotab'%5D = frame%5B'f_code'%5D%5B'co_lnotab'%5D.encode('latin1')%0A
|
2490228f94b24dd45f9ee879d9ff91c5a3dc3300
|
Add process "running" status to log output.
|
pysc2/lib/sc_process.py
|
pysc2/lib/sc_process.py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch the game and set up communication."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import os
import shutil
import socket
import subprocess
import sys
import tempfile
import time
from future.builtins import range # pylint: disable=redefined-builtin
import portpicker
from pysc2.lib import protocol
from pysc2.lib import remote_controller
from pysc2.lib import stopwatch
import websocket
from absl import flags
flags.DEFINE_bool("sc2_verbose", False, "Enable SC2 verbose logging.")
FLAGS = flags.FLAGS
sw = stopwatch.sw
class StarcraftProcess(object):
"""Launch a starcraft server, initialize a controller, and later, clean up.
This is best used from run_configs.py. It is important to call `close`,
otherwise you'll likely leak temp files and SC2 processes (chewing CPU).
Usage:
p = StarcraftProcess(run_config)
p.controller.ping()
p.close()
or:
with StarcraftProcess(run_config) as controller:
controller.ping()
"""
def __init__(self, run_config, full_screen=False, game_version=None,
data_version=None, verbose=False, **kwargs):
self._proc = None
self._sock = None
self._controller = None
self._tmp_dir = tempfile.mkdtemp(prefix="sc-", dir=run_config.tmp_dir)
self._port = portpicker.pick_unused_port()
exec_path = run_config.exec_path(game_version)
self._check_exists(exec_path)
args = [
exec_path,
"-listen", "127.0.0.1",
"-port", str(self._port),
"-dataDir", os.path.join(run_config.data_dir, ""),
"-tempDir", os.path.join(self._tmp_dir, ""),
"-displayMode", "1" if full_screen else "0",
]
if verbose or FLAGS.sc2_verbose:
args += ["-verbose"]
if data_version:
args += ["-dataVersion", data_version.upper()]
try:
self._proc = self._launch(run_config, args, **kwargs)
self._sock = self._connect(self._port)
client = protocol.StarcraftProtocol(self._sock)
self._controller = remote_controller.RemoteController(client)
with sw("startup"):
self._controller.ping()
except:
self.close()
raise
@sw.decorate
def close(self):
"""Shut down the game and clean up."""
self._shutdown()
self._proc = None
self._sock = None
self._controller = None
if os.path.exists(self._tmp_dir):
shutil.rmtree(self._tmp_dir)
@property
def controller(self):
return self._controller
def __enter__(self):
return self.controller
def __exit__(self, unused_exception_type, unused_exc_value, unused_traceback):
self.close()
def __del__(self):
# Prefer using a context manager, but this cleans most other cases.
self.close()
def _check_exists(self, exec_path):
if not os.path.isfile(exec_path):
raise RuntimeError("Trying to run '%s', but it doesn't exist" % exec_path)
if not os.access(exec_path, os.X_OK):
raise RuntimeError(
"Trying to run '%s', but it isn't executable." % exec_path)
def _launch(self, run_config, args, **kwargs):
"""Launch the process and return the process object."""
del kwargs
try:
with sw("popen"):
return subprocess.Popen(args, cwd=run_config.cwd, env=run_config.env)
except OSError:
logging.exception("Failed to launch")
sys.exit("Failed to launch: " + str(args))
@sw.decorate
def _connect(self, port):
"""Connect to the websocket, retrying as needed. Returns the socket."""
was_running = False
for i in range(120):
is_running = self.running
was_running = was_running or is_running
if (i >= 30 or was_running) and not is_running:
logging.warning(
"SC2 isn't running, so bailing early on the websocket connection.")
break
logging.info("Connection attempt %s", i)
time.sleep(1)
try:
return websocket.create_connection("ws://127.0.0.1:%s/sc2api" % port,
timeout=2 * 60) # 2 minutes
except socket.error:
pass # SC2 hasn't started listening yet.
except websocket.WebSocketException as err:
if "Handshake Status 404" in str(err):
pass # SC2 is listening, but hasn't set up the /sc2api endpoint yet.
else:
raise
sys.exit("Failed to create the socket.")
def _shutdown(self):
"""Terminate the sub-process."""
if self._proc:
ret = _shutdown_proc(self._proc, 3)
logging.info("Shutdown with return code: %s", ret)
self._proc = None
@property
def running(self):
return self._proc.poll() if self._proc else False
def _shutdown_proc(p, timeout):
"""Wait for a proc to shut down, then terminate or kill it after `timeout`."""
freq = 10 # how often to check per second
for _ in range(1 + timeout * freq):
ret = p.poll()
if ret is not None:
logging.info("Shutdown gracefully.")
return ret
time.sleep(1 / freq)
logging.warning("Killing the process.")
p.kill()
return p.wait()
|
Python
| 0.000015 |
@@ -4487,12 +4487,38 @@
t %25s
-%22, i
+ (running: %25s)%22, i, is_running
)%0A
|
f00d5073c46bc553c1d42e623b2eca5c6dfaa25c
|
Treat regex string as raw
|
tractor-purge.py
|
tractor-purge.py
|
#!/usr/bin/env python
import sys
import os
import platform
import subprocess
import re
import shutil
import datetime
import logging
from optparse import OptionParser
####################################
# Option parser and constants
TRACTOR_PURGE_VERSION = 'v1.0.0'
parser = OptionParser(version='%prog ' + TRACTOR_PURGE_VERSION)
parser.add_option('-t', '--tq', dest='tq',
default='/opt/pixar/Tractor-2.2/bin/tq',
help='Absolute path to tq [default: %default]')
parser.add_option('-c', '--cmdlogsdir', dest='cmdlogsdir',
default='/var/spool/tractor/cmd-logs',
help='Absolute path to cmd-logs dir [default: %default]')
parser.add_option('-l', '--log', dest='logfile',
default='/var/tmp/tractor-purge.log',
help='Absolute path to tractor-purge log file '
'[default: %default]')
parser.add_option('-d', '--days', dest='days', default='30',
help='Number of days worth of jobs/logs to keep '
'[default: %default]')
parser.add_option('--deletejobs', action='store_true', dest='deletejobs',
default=False,
help='Delete jobs from psql database after log deletion. '
'If DBArchiving is True in Tractor config, archive '
'jobs instead.')
parser.add_option('--dryrun', action='store_true', dest='dryrun',
default=False,
help='Do not perform actual deletion, instead just preview \
deletions')
(options, args) = parser.parse_args()
TQ = options.tq
CMD_LOGS_DIR = options.cmdlogsdir
PURGE_LOG = options.logfile
DAYS = options.days
DELETE_JOBS = options.deletejobs
DRY_RUN = options.dryrun
####################################
# General setup
# Logging
logger = logging.getLogger('Tractor 2.2 purger')
hdlr = logging.FileHandler(PURGE_LOG)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
# Logging to stdout
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
####################################
# Functions
def jobs_to_delete(days):
"""Create list of all jids (equivalient of all jobs to be deleted)
"""
jids = []
command = [TQ, 'jobs',
'not active and not ready and spooltime < -' + days + 'd',
'--noheader', '--archives', '-c', 'jid']
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
try:
for line in iter(p.stdout.readline, b''):
sys.stdout.flush()
jid = line.rstrip()
jids.append(int(jid))
logger.info('Added job for deletion: ' + jid)
except:
logger.warning('Failed to read stdout.')
return jids
def get_all_job_folders(cmd_logs_dir):
"""Create list of all job folders
"""
job_folders = []
for root, directories, files in os.walk(cmd_logs_dir):
if len(directories) > 0:
for directory in directories:
match = re.search('J\d*', directory)
if match:
job_folder = root + '/' + directory
job_folders.append(job_folder)
return job_folders
def get_job_deletion_list(job_folders, jids):
"""Compare job folders list against jids list, create deletion list
"""
delete_list = []
for job_folder in job_folders:
jid_match = False
for jid in jids:
if job_folder.endswith('J' + str(jid)):
jid_match = True
if jid_match:
delete_list.append(job_folder)
logger.info('Added log folder for deletion: ' + job_folder)
return delete_list
def delete_logs(delete_list):
"""Delete the actual log folders
"""
for job_folder in delete_list:
if not DRY_RUN:
logger.info('Deleting ' + job_folder)
shutil.rmtree(job_folder)
else:
logger.info('Dry run: (not) deleting ' + job_folder)
def delete_tractor_jobs(days):
"""Delete jobs from Tractor. You can also delete jobs manually using:
tractor-dbctl --purge-archive-to-year-month YY-MM
"""
if not DRY_RUN:
logger.info('Executing tq command to delete jobs...')
command = [TQ, '--force', '--yes', 'delete',
'not active and not ready and spooltime < -' + days + 'd']
else:
logger.info('Executing tq command to (not) delete jobs...')
command = [TQ, 'jobs', '--archives',
'not active and not ready and spooltime < -' + days + 'd']
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
try:
for line in iter(p.stdout.readline, b''):
sys.stdout.flush()
logger.info(line.rstrip())
except:
logger.warning('Failed reading stdout.')
####################################
# Main
if __name__ == '__main__':
if not DRY_RUN:
logger.info('Tractor purge initiated.')
else:
logger.info('Tractor purge initiated in "dry run" mode.')
# Queries
jids = jobs_to_delete(days=DAYS)
job_folders = get_all_job_folders(cmd_logs_dir=CMD_LOGS_DIR)
delete_list = get_job_deletion_list(job_folders=job_folders, jids=jids)
# Summary
logger.info('Jobs to be deleted: ' + str(len(jids)))
logger.info('Job log folders found: ' + str(len(job_folders)))
logger.info('Job log folders to be deleted: ' + str(len(delete_list)))
# Delete logs
if len(jids) > 0:
delete_logs(delete_list=delete_list)
else:
logger.info('No logs to delete.')
# Delete jobs
if DELETE_JOBS:
if len(jids) > 0:
delete_tractor_jobs(days=DAYS)
else:
logger.info('No jobs to delete.')
logger.info('Tractor purge done.\n')
|
Python
| 0.999994 |
@@ -3234,16 +3234,17 @@
.search(
+r
'J%5Cd*',
|
9017a30c6e6ec7b08e4793fe4e730bba447f1d59
|
Fix deprecation warning about unittest.assertRaisesRegexp() in Python 3.
|
pybtex/tests/plugin_test.py
|
pybtex/tests/plugin_test.py
|
# Copyright (c) 2014 Matthias C. M. Troffaes
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import re
import nose.tools
import pybtex.database.input.bibtex
import pybtex.plugin
import pybtex.style.formatting.plain
def test_plugin_loader():
"""Check that all enumerated plugins can be imported."""
for group in pybtex.plugin._DEFAULT_PLUGINS:
for name in pybtex.plugin.enumerate_plugin_names(group):
pybtex.plugin.find_plugin(group, name)
class TestPlugin1(pybtex.plugin.Plugin):
pass
class TestPlugin2(pybtex.plugin.Plugin):
pass
class TestPlugin3(pybtex.plugin.Plugin):
pass
class TestPlugin4(pybtex.plugin.Plugin):
pass
def test_register_plugin_1():
nose.tools.assert_true(
pybtex.plugin.register_plugin(
'pybtex.style.formatting', 'yippikayee', TestPlugin1))
nose.tools.assert_is(
TestPlugin1, pybtex.plugin.find_plugin(
'pybtex.style.formatting', 'yippikayee'))
nose.tools.assert_false(
pybtex.plugin.register_plugin(
'pybtex.style.formatting', 'yippikayee', TestPlugin2))
nose.tools.assert_is(
TestPlugin1, pybtex.plugin.find_plugin(
'pybtex.style.formatting', 'yippikayee'))
nose.tools.assert_true(
pybtex.plugin.register_plugin(
'pybtex.style.formatting', 'yippikayee', TestPlugin2, force=True))
nose.tools.assert_is(
TestPlugin2, pybtex.plugin.find_plugin(
'pybtex.style.formatting', 'yippikayee'))
def test_register_plugin_2():
nose.tools.assert_false(
pybtex.plugin.register_plugin(
'pybtex.style.formatting', 'plain', TestPlugin2))
plugin = pybtex.plugin.find_plugin('pybtex.style.formatting', 'plain')
nose.tools.assert_is_not(plugin, TestPlugin2)
nose.tools.assert_is(plugin, pybtex.style.formatting.plain.Style)
def test_register_plugin_3():
nose.tools.assert_true(
pybtex.plugin.register_plugin(
'pybtex.style.formatting.suffixes', '.woo', TestPlugin3))
plugin = pybtex.plugin.find_plugin(
'pybtex.style.formatting', filename='test.woo')
nose.tools.assert_is(plugin, TestPlugin3)
def test_bad_find_plugin():
nose.tools.assert_raises(
pybtex.plugin.PluginGroupNotFound,
lambda: pybtex.plugin.find_plugin("pybtex.invalid.group", "__oops"))
nose.tools.assert_raises_regexp(
pybtex.plugin.PluginNotFound,
re.escape('plugin pybtex.style.formatting.__oops not found'),
lambda: pybtex.plugin.find_plugin("pybtex.style.formatting", "__oops"))
nose.tools.assert_raises(
pybtex.plugin.PluginNotFound,
lambda: pybtex.plugin.find_plugin("pybtex.style.formatting",
filename="oh.__oops"))
def test_bad_register_plugin():
nose.tools.assert_raises(
pybtex.plugin.PluginGroupNotFound,
lambda: pybtex.plugin.register_plugin(
"pybtex.invalid.group", "__oops", TestPlugin1))
nose.tools.assert_raises(
pybtex.plugin.PluginGroupNotFound,
lambda: pybtex.plugin.register_plugin(
"pybtex.invalid.group.suffixes", ".__oops", TestPlugin1))
# suffixes must start with a dot
nose.tools.assert_raises(
ValueError,
lambda: pybtex.plugin.register_plugin(
"pybtex.style.formatting.suffixes", "notasuffix", TestPlugin1))
def test_plugin_suffix():
plugin = pybtex.plugin.find_plugin(
"pybtex.database.input", filename="test.bib")
nose.tools.assert_is(plugin, pybtex.database.input.bibtex.Parser)
def test_plugin_alias():
pybtex.plugin._DEFAULT_PLUGINS['pybtex.legacy.input'] = 'punchcard'
nose.tools.assert_true(
pybtex.plugin.register_plugin(
'pybtex.legacy.input', 'punchcard', TestPlugin4))
nose.tools.assert_true(
pybtex.plugin.register_plugin(
'pybtex.legacy.input.aliases', 'punchedcard', TestPlugin4))
nose.tools.assert_equal(
list(pybtex.plugin.enumerate_plugin_names('pybtex.legacy.input')),
['punchcard']
)
plugin = pybtex.plugin.find_plugin("pybtex.legacy.input", 'punchedcard')
nose.tools.assert_equal(plugin, TestPlugin4)
del pybtex.plugin._DEFAULT_PLUGINS['pybtex.legacy.input']
def test_plugin_class():
"""If a plugin class is passed to find_plugin(), it shoud be returned back."""
plugin = pybtex.plugin.find_plugin("pybtex.database.input", 'bibtex')
plugin2 = pybtex.plugin.find_plugin("pybtex.database.input", plugin)
nose.tools.assert_equal(plugin, plugin2)
|
Python
| 0 |
@@ -1224,16 +1224,242 @@
plain%0A%0A%0A
+# unittest.assertRaisesRegexp() is deprecated and renamed to%0A# unittest.assertRaisesRegex() in Python 3%0Aif not hasattr(nose.tools, 'assert_raises_regex'):%0A nose.tools.assert_raises_regex = nose.tools.assert_raises_regexp%0A%0A%0A
def test
@@ -3623,17 +3623,16 @@
es_regex
-p
(%0A
|
c2a99a33455e3b01ccce3faebd3a541b4a76e579
|
Bump version
|
yamale/__init__.py
|
yamale/__init__.py
|
from .yamale import make_schema, make_data, validate
VERSION = (1, 0, 0, 'final', 0)
# Dynamically calculate the version based on VERSION.
def get_version():
"Returns a PEP 386-compliant version number from VERSION."
version = VERSION
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
parts = 2 if version[2] == 0 else 3
main = '.'.join(str(x) for x in version[:parts])
sub = ''
if version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version[3]] + str(version[4])
return str(main + sub)
__version__ = get_version()
|
Python
| 0 |
@@ -65,17 +65,17 @@
(1, 0,
-0
+1
, 'final
|
33b4c181b2d9a3d74f45ee1ced971b5bca58b35b
|
remove unused import
|
treenav/admin.py
|
treenav/admin.py
|
from django.contrib import admin
from django import forms
from django.contrib.contenttypes import generic
from treenav import models as treenav
from treenav.forms import MenuItemForm, GenericInlineMenuItemForm
class GenericMenuItemInline(generic.GenericStackedInline):
"""
Add this inline to your admin class to support editing related menu items
from that model's admin page.
"""
extra = 0
max_num = 1
model = treenav.MenuItem
form = GenericInlineMenuItemForm
class SubMenuItemInline(admin.TabularInline):
model = treenav.MenuItem
extra = 1
form = MenuItemForm
prepopulated_fields = {'slug': ('label',)}
exclude = ('new_parent',)
class MenuItemAdmin(admin.ModelAdmin):
list_display = (
'menu_items',
'slug',
'label',
'parent',
'link',
'href_link',
'order',
'is_enabled',
)
list_filter = ('parent', 'is_enabled')
raw_id_fields = ('parent',)
prepopulated_fields = {'slug': ('label',)}
inlines = (SubMenuItemInline, )
fieldsets = (
(None, {
'fields': ('new_parent', 'label', 'slug', 'order', 'is_enabled')
}),
('URL', {
'fields': ('link', ('content_type', 'object_id')),
'description': "The URL for this menu item, which can be a "
"fully qualified URL, an absolute URL, a named "
"URL, a path to a Django view, a regular "
"expression, or a generic relation to a model that "
"supports get_absolute_url()"
}),
)
list_editable = ('label',)
form = MenuItemForm
def menu_items(self, obj):
if obj.level == 0:
return obj.label
return ' '*obj.level + '- %s' % obj.label
menu_items.allow_tags = True
def href_link(self, obj):
return '<a href="%s">%s</a>' % (obj.href, obj.href)
href_link.short_description = 'HREF'
href_link.allow_tags = True
admin.site.register(treenav.MenuItem, MenuItemAdmin)
|
Python
| 0.000001 |
@@ -30,33 +30,8 @@
min%0A
-from django import forms%0A
from
|
29f085888ec0b44f225fda47634abee3a8e48bda
|
Make host.install() optional
|
pyblish_starter/pipeline.py
|
pyblish_starter/pipeline.py
|
import os
import re
import sys
import types
import logging
import datetime
from pyblish import api
self = sys.modules[__name__]
self._registered_data = list()
self._registered_families = list()
self._log = logging.getLogger()
# Mock host interface
host = types.ModuleType("default")
host.ls = lambda: ["Asset1", "Asset2"]
host.loader = lambda asset, version, representation: None
host.creator = lambda name, family: "my_instance"
self._registered_host = host
def install(host):
"""Install `host` into the running Python session.
Arguments:
host (module): A Python module containing the Pyblish
starter host-interface.
"""
host.install()
register_host(host)
register_plugins()
register_data(key="id", value="pyblish.starter.instance")
register_data(key="label", value="{name}")
register_data(key="family", value="{family}")
register_family(
name="starter.model",
help="Polygonal geometry for animation"
)
register_family(
name="starter.rig",
help="Character rig"
)
register_family(
name="starter.animation",
help="Pointcache"
)
def ls():
"""List available assets"""
root = self.registered_host().root()
dirname = os.path.join(root, "public")
self._log.debug("Listing %s" % dirname)
try:
return os.listdir(dirname)
except OSError:
return list()
def abspath(asset, version=-1, representation=None):
root = registered_host().root()
dirname = os.path.join(
root,
"public",
asset
)
try:
versions = os.listdir(dirname)
except OSError:
raise OSError("\"%s\" not found." % asset)
# Automatically deduce version
if version == -1:
version = find_latest_version(versions)
dirname = os.path.join(
dirname,
"v%03d" % version
)
try:
representations = dict()
for fname in os.listdir(dirname):
name, ext = os.path.splitext(fname)
representations[ext] = fname
if not representations:
raise OSError
except OSError:
raise OSError("v%03d of \"%s\" not found." % (version, asset))
# Automatically deduce representation
if representation is None:
fname = representations.values()[0]
return os.path.join(
dirname,
fname
)
def register_host(host):
for member in ("root",
"loader",
"creator"):
assert hasattr(host, member), "Missing %s" % member
self._registered_host = host
def register_plugins():
"""Register accompanying plugins"""
from . import plugins
plugin_path = os.path.dirname(plugins.__file__)
api.register_plugin_path(plugin_path)
def register_data(key, value, help=None):
"""Register new default attribute
Arguments:
key (str): Name of data
value (object): Arbitrary value of data
help (str, optional): Briefly describe
"""
self._registered_data.append({
"key": key,
"value": value,
"help": help or ""
})
def register_family(name, data=None, help=None):
"""Register family and attributes for family
Arguments:
name (str): Name of family
data (dict, optional): Additional data, see
:func:`register_data` for docstring on members
help (str, optional): Briefly describe this family
"""
self._registered_families.append({
"name": name,
"data": data or [],
"help": help or ""
})
def registered_families():
return list(self._registered_families)
def registered_data():
return list(self._registered_data)
def registered_host():
return self._registered_host
def time():
return datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%SZ")
def format_private_dir(root, name):
dirname = os.path.join(root, "private", time(), name)
return dirname
def find_latest_version(versions):
"""Return latest version from list of versions
If multiple numbers are found in a single version,
the last one found is used. E.g. (6) from "v7_22_6"
Arguments:
versions (list): Version numbers as string
Example:
>>> find_next_version(["v001", "v002", "v003"])
4
>>> find_next_version(["1", "2", "3"])
4
>>> find_next_version(["v1", "v0002", "verision_3"])
4
>>> find_next_version(["v2", "5_version", "verision_8"])
9
>>> find_next_version(["v2", "v3_5", "_1_2_3", "7, 4"])
6
>>> find_next_version(["v010", "v011"])
12
"""
highest_version = 0
for version in versions:
matches = re.findall(r"\d+", version)
if not matches:
continue
version = int(matches[-1])
if version > highest_version:
highest_version = version
return highest_version
def find_next_version(versions):
"""Return next version from list of versions
See docstring for :func:`find_latest_version`.
Arguments:
versions (list): Version numbers as string
Returns:
int: Next version number
"""
return find_latest_version(versions) + 1
|
Python
| 0.000025 |
@@ -290,12 +290,35 @@
ost.
-ls =
+__dict__.update(%7B%0A %22ls%22:
lam
@@ -346,22 +346,23 @@
t2%22%5D
-%0Ahost.
+,%0A %22
loader
- =
+%22:
lam
@@ -405,23 +405,24 @@
None
-%0Ahost.
+,%0A %22
creator
- =
+%22:
lam
@@ -452,16 +452,19 @@
nstance%22
+%0A%7D)
%0A%0Aself._
@@ -694,22 +694,116 @@
-host.install()
+try:%0A # Optional host install function%0A host.install()%0A except AttributeError:%0A pass
%0A%0A
|
712989db37532a7810139dd2f7007c66652a0dd7
|
Fix documentation
|
ditto/flickr/management/commands/__init__.py
|
ditto/flickr/management/commands/__init__.py
|
from django.core.management.base import CommandError
from ....core.management.commands import DittoBaseCommand
class FetchCommand(DittoBaseCommand):
"""
Parent for all classes that fetch some things from Flickr. Photos,
Photosets, Files, etc.
"""
def add_arguments(self, parser):
"All children will have the --account option."
super().add_arguments(parser)
parser.add_argument(
"--account",
action="store",
default=False,
help=(
"The NSID of the Flickr User associated with the one "
"Account to fetch for."
),
)
class FetchPhotosCommand(FetchCommand):
# What we're fetching:
singular_noun = "Photo"
plural_noun = "Photos"
# Child classes should supply some help text for the --days and --start --end arguments:
days_help = ""
range_help = ""
def add_arguments(self, parser):
super().add_arguments(parser)
group = parser.add_mutually_exclusive_group()
group.add_argument("--days", action="store", default=False, help=self.days_help)
group.add_argument(
"--range", action="store", default=False, help=self.range_help
)
def handle(self, *args, **options):
# We might be fetching for a specific account or all (None).
nsid = options["account"] if options["account"] else None
if options["days"]:
# Will be either 'all' or a number; make the number an int.
if options["days"].isdigit():
options["days"] = int(options["days"])
elif options["days"] != "all":
raise CommandError("--days should be an integer or 'all'.")
results = self.fetch_photos(nsid, options["days"], range=None)
self.output_results(results, options.get("verbosity", 1))
elif options["range"]:
results = self.fetch_photos(nsid, options["days"], options["range"])
self.output_results(results, options.get("verbosity", 1))
elif options["account"]:
raise CommandError("Specify --days as well as --account.")
else:
raise CommandError("Specify --days , eg --days=3 or --days=all.")
def fetch_photos(self, nsid, days, range):
"""Child classes should override this method to call a method that
fetches photos and returns results, eg:
return RecentPhotosMultiAccountFetcher(nsid=nsid).fetch(days=days)
"""
return {}
|
Python
| 0.000001 |
@@ -855,19 +855,13 @@
d --
-start --end
+range
arg
|
1f70cefdb94f995c3db9a29a11262aaf2cd7658e
|
add custom exceptions to estimators module
|
ndd/estimators.py
|
ndd/estimators.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2016,2017 Simone Marsili
# All rights reserved.
# License: BSD 3 clause
"""Base classes module."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from builtins import ( # pylint: disable=redefined-builtin, unused-import
bytes, dict, int, list, object, range, str, ascii, chr, hex, input, next,
oct, open, pow, round, super, filter, map, zip)
import logging
import numpy
from ndd.base import EntropyEstimator
logger = logging.getLogger(__name__)
__all__ = ['Entropy', 'KLDivergence', 'JSDivergence']
# TODO: docstrings
class Entropy(EntropyEstimator):
"""Entropy estimator class.
Default: use the NSB estimator function.
Parameters
----------
alpha : float, optional
If not None: Wolpert-Wolf estimator (fixed alpha).
A single Dirichlet prior with concentration parameter alpha.
alpha > 0.0.
plugin : boolean, optional
If True: 'plugin' estimator.
The discrete distribution is estimated from the empirical frequencies
over bins and inserted into the entropy definition (plugin estimator).
If alpha is passed in combination with plugin=True, add
alpha pseudocounts to each frequency count (pseudocount estimator).
Attributes
----------
estimator : estimator function
The four possible entropy estimator functions are: plugin, plugin with
pseudocounts, Wolpert-Wolf (WW) and Nemenman-Shafee-Bialek (NSB).
"""
def fit(self, pk, k=None):
"""
Compute an entropy estimate from pk.
Parameters
----------
pk : array_like, shape (n_bins,)
The number of occurrences of a set of bins.
k : int, optional
Number of bins. k >= len(pk).
Float values are valid input for whole numbers (e.g. k=1.e3).
Defaults to len(pk).
Returns
-------
self : object
Returns the instance itself.
"""
if k == 1: # single bin
self.estimate_ = self.err_ = 0.0
else:
self.estimate_, self.err_ = self.estimator(pk, k)
return self
class KLDivergence(EntropyEstimator):
"""Kullback-Leibler divergence estimator class.
Default: use the NSB estimator function.
Parameters
----------
alpha : float, optional
If alpha is not None: Wolpert-Wolf estimator (fixed alpha).
A single Dirichlet prior with concentration parameter alpha.
alpha > 0.0.
plugin : boolean, optional
If True: 'plugin' estimator.
The discrete distribution is estimated from the empirical frequencies
over bins and inserted into the entropy definition (plugin estimator).
If alpha is passed in combination with plugin=True, add
alpha pseudocounts to each frequency count (pseudocount estimator).
Attributes
----------
estimator : estimator function
The four possible entropy estimator functions are: plugin, plugin with
pseudocounts, Wolpert-Wolf (WW) and Nemenman-Shafee-Bialek (NSB).
"""
"""Kullback-Leibler divergence estimator class."""
def fit(self, pk, qk, k=None):
"""
pk : array_like
The number of occurrences of a set of bins.
qk : array_like
Reference PMF in sum(pk log(pk/qk).
Must be a valid PMF (non-negative, normalized).
k : int, optional
Number of bins. k >= len(pk).
Float values are valid input for whole numbers (e.g. k=1.e3).
Defaults to len(pk).
"""
if is_pmf(qk):
log_qk = numpy.log(qk)
else:
raise ValueError('qk must be a valid PMF')
if len(log_qk) != len(pk):
raise ValueError('qk and pk must have the same length.')
if k == 1: # single bin
self.estimate_ = self.err_ = 0.0
else:
self.estimate_, self.err_ = self.estimator(pk, k)
self.estimate_ += numpy.sum(pk * log_qk) / float(sum(pk))
self.estimate_ = - self.estimate_
return self
class JSDivergence(EntropyEstimator):
"""Jensen-Shannon divergence estimator class.
Default: use the NSB estimator function.
Parameters
----------
alpha : float, optional
If alpha is not None: Wolpert-Wolf estimator (fixed alpha).
A single Dirichlet prior with concentration parameter alpha.
alpha > 0.0.
plugin : boolean, optional
If True: 'plugin' estimator.
The discrete distribution is estimated from the empirical frequencies
over bins and inserted into the entropy definition (plugin estimator).
If alpha is passed in combination with plugin=True, add
alpha pseudocounts to each frequency count (pseudocount estimator).
Attributes
----------
estimator : estimator function
The four possible entropy estimator functions are: plugin, plugin with
pseudocounts, Wolpert-Wolf (WW) and Nemenman-Shafee-Bialek (NSB).
"""
def fit(self, pk, k=None):
"""
pk : array_like
n-by-p array. Different rows correspond to counts from different
distributions with the same discrete sample space.
k : int, optional
Number of bins. k >= p if pk is n-by-p.
Float values are valid input for whole numbers (e.g. k=1.e3).
Defaults to pk.shape[1].
"""
pk = numpy.int32(pk)
if pk.ndim != 2:
raise ValueError('counts must be 2D.')
ws = numpy.float64(pk.sum(axis=1))
ws /= ws.sum()
if k == 1: # single bin
self.estimate_ = 0.0
else:
self.estimate_ = self.estimator(pk.sum(axis=0), k)[0] - sum(
ws[i] * self.estimator(x, k)[0] for i, x in enumerate(pk))
return self
def is_pmf(a):
a = numpy.float64(a)
not_negative = numpy.all(a >= 0)
normalized = numpy.isclose(sum(a), 1.0)
return not_negative and normalized
|
Python
| 0 |
@@ -511,16 +511,65 @@
stimator
+%0Afrom ndd.exceptions import PMFError, CountsError
%0A%0Alogger
@@ -3702,32 +3702,118 @@
ts to len(pk).%0A%0A
+ Raises%0A ------%0A PMFError%0A If qk is not a valid PMF.%0A%0A
%22%22%22%0A
@@ -3800,32 +3800,32 @@
F.%0A%0A %22%22%22%0A
-
if is_pm
@@ -3890,37 +3890,35 @@
raise
-Value
+PMF
Error('qk must b
@@ -3983,29 +3983,27 @@
raise
-Value
+PMF
Error('qk an
@@ -5664,16 +5664,104 @@
pe%5B1%5D.%0A%0A
+ Raises%0A ------%0A CountsError%0A If pk is not a 2D array.%0A%0A
@@ -5840,13 +5840,14 @@
ise
-Value
+Counts
Erro
@@ -5856,16 +5856,22 @@
'counts
+array
must be
|
6c8a9edb6d733ac680ea2cbcb1c8d12511aa72be
|
Update webserver.py
|
webserver.py
|
webserver.py
|
#!/usr/bin/env python
# author: [email protected]
import ConfigParser
from bottle import route, install, run, template, static_file, PasteServer
from bottle_sqlite import SQLitePlugin
import json
import urllib
import urllib2
import datetime
config = ConfigParser.RawConfigParser()
config.read('config.ini')
install(SQLitePlugin(dbfile=(config.get("pool", "database"))))
@route('/')
def default():
output = template('default')
return output
@route('/static/:path#.+#', name='static')
def static(path):
return static_file(path, root='static')
@route('/accounts')
def accounts():
poolAccount = json.loads(urllib2.urlopen(config.get("pool", "nhzhost")+"/nhz?requestType=getAccount&account="+config.get("pool", "poolaccount")).read())
clean = poolAccount["lessors"]
output = template('accounts', leased=clean)
return output
@route('/blocks')
def blocks(db):
c = db.execute("SELECT timestamp, block, totalfee FROM blocks")
result = c.fetchall()
c.close()
payload = {
'requestType': 'getForging',
'secretPhrase': config.get("pool", "poolphrase")
}
opener = urllib2.build_opener(urllib2.HTTPHandler())
data = urllib.urlencode(payload)
forging = json.loads(opener.open(config.get("pool", "nhzhost")+'/nhz', data=data).read())
getdl = forging["deadline"]
deadline = str(datetime.timedelta(seconds=getdl))
output = template('blocks', rows=result, fg=deadline)
return output
@route('/payouts')
def payouts(db):
c = db.execute("SELECT account, percentage, amount, paid, blocktime FROM accounts")
result = c.fetchall()
output = template('payouts', rows=result)
return output
run(server=PasteServer, port=8888, host='0.0.0.0')
|
Python
| 0 |
@@ -964,16 +964,35 @@
M blocks
+ WHERE totalfee %3E 0
%22)%0A r
@@ -1764,8 +1764,9 @@
.0.0.0')
+%0A
|
5f839240a4223d599ad57393097bbc19502ae213
|
add condition not is_retracted
|
website/discovery/views.py
|
website/discovery/views.py
|
import datetime
from website import settings
from website.project import Node
from website.project.utils import recent_public_registrations
from modularodm.query.querydialect import DefaultQueryDialect as Q
from framework.analytics.piwik import PiwikClient
def activity():
popular_public_projects = []
popular_public_registrations = []
hits = {}
# get the date for exactly one week ago
target_date = datetime.date.today() - datetime.timedelta(weeks=1)
if settings.PIWIK_HOST:
client = PiwikClient(
url=settings.PIWIK_HOST,
auth_token=settings.PIWIK_ADMIN_TOKEN,
site_id=settings.PIWIK_SITE_ID,
period='week',
date=target_date.strftime('%Y-%m-%d'),
)
popular_project_ids = [
x for x in client.custom_variables if x.label == 'Project ID'
][0].values
for nid in popular_project_ids:
node = Node.load(nid.value)
if node is None:
continue
if node.is_public and not node.is_registration and not node.is_deleted:
if len(popular_public_projects) < 10:
popular_public_projects.append(node)
elif node.is_public and node.is_registration and not node.is_deleted:
if len(popular_public_registrations) < 10:
popular_public_registrations.append(node)
if len(popular_public_projects) >= 10 and len(popular_public_registrations) >= 10:
break
hits = {
x.value: {
'hits': x.actions,
'visits': x.visits
} for x in popular_project_ids
}
# Projects
recent_query = (
Q('category', 'eq', 'project') &
Q('is_public', 'eq', True) &
Q('is_deleted', 'eq', False)
)
recent_public_projects = Node.find(
recent_query &
Q('is_registration', 'eq', False)
).sort(
'-date_created'
).limit(10)
return {
'recent_public_projects': recent_public_projects,
'recent_public_registrations': recent_public_registrations(),
'popular_public_projects': popular_public_projects,
'popular_public_registrations': popular_public_registrations,
'hits': hits,
}
|
Python
| 0.001134 |
@@ -1279,32 +1279,58 @@
node.is_deleted
+ and not node.is_retracted
:%0A
|
5820a92c7945657a38eb5b54eef5e47b2ff1ec39
|
Fix url coding
|
src/checker/plugin/links_finder_plugin.py
|
src/checker/plugin/links_finder_plugin.py
|
from bs4 import BeautifulSoup
from yapsy.IPlugin import IPlugin
from requests.exceptions import InvalidSchema
from requests.exceptions import ConnectionError
from requests.exceptions import MissingSchema
import requests
import urlparse
import urllib
import marisa_trie
class LinksFinder(IPlugin):
def __init__(self):
self.database = None
self.types = None
self.trie = None
def setDb(self, DB):
self.database = DB
def setTypes(self, types):
self.types = types
self.trie = marisa_trie.Trie(types)
def check(self, transactionId, content):
""" Najde tagy <a>, <link>, vybere atribut href, ulozi jako odkazy,
stahne obsah jako dalsi transakci.
"""
soup = BeautifulSoup(content, 'html.parser')
uri = self.database.getUri(transactionId)
self.make_links_absolute(soup, uri,'a')
links = soup.find_all('a')
self.check_links(links, "Link to ", transactionId, 'href')
self.make_links_absolute(soup, uri, 'link')
links2 = soup.find_all('link')
self.check_links(links2, "Linked resource: ", transactionId, 'href')
self.make_sources_absolute(soup, uri, 'img')
images = soup.find_all('img')
self.check_links(images, "Image: ", transactionId, 'src')
return
def getId(self):
return "linksFinder"
def getLink(self, url, reqId, srcId):
try:
print "Inspecting "+url
r = requests.head(url)
if r.status_code != 200:
self.database.setDefect(srcId, "badlink", 0, url)
if 'content-type' in r.headers.keys():
ct = r.headers['content-type']
else:
ct = ''
if self.getMaxPrefix(ct) in self.types:
print "Downloading "+url
r = requests.get(url)
self.database.setResponse(reqId, r.status_code, ct, r.text.encode("utf-8").strip()[:65535])
else: print "Content type not accepted: "+ct
except InvalidSchema:
print "Invalid schema"
except ConnectionError:
print "Connection error"
except MissingSchema:
print "Missing schema"
def make_links_absolute(self, soup, url, tag):
print "Make links absolute: "+url
for tag in soup.findAll(tag, href=True):
if 'href' in tag.attrs:
tag['href'] = urlparse.urljoin(url, tag['href'])
def make_sources_absolute(self, soup, url, tag):
for tag in soup.findAll(tag):
tag['src'] = urlparse.urljoin(url, tag['src'])
def check_links(self, links, logMsg, transactionId, tag):
for link in links:
url = link.get(tag)
if url is not None:
urlNoAnchor = url.split('#')[0]
reqId = self.database.setLink(transactionId, urllib.quote(urlNoAnchor))
print logMsg+str(url)
if reqId != -1:
self.getLink(url, reqId, transactionId)
def getMaxPrefix(self, ctype):
prefList = self.trie.prefixes(unicode(ctype, encoding="utf-8"))
if len(prefList) > 0:
return prefList[-1]
else: return ctype
|
Python
| 0.999981 |
@@ -2889,16 +2889,32 @@
NoAnchor
+.encode('utf-8'(
))%0A
|
954f89757235ae390b9c246f420d5adf909aa7de
|
in product listing, don't truncate description
|
website/helpers/product.py
|
website/helpers/product.py
|
# Copyright (c) 2012 Web Notes Technologies Pvt Ltd.
# License: GNU General Public License (v3). For more information see license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import cstr
from website.utils import build_html, url_for_website, delete_page_cache
@webnotes.whitelist(allow_guest=True)
def get_product_info(item_code):
"""get product price / stock info"""
price_list = webnotes.conn.get_value("Item", item_code, "website_price_list")
warehouse = webnotes.conn.get_value("Item", item_code, "website_warehouse")
if warehouse:
in_stock = webnotes.conn.sql("""select actual_qty from tabBin where
item_code=%s and warehouse=%s""", (item_code, warehouse))
if in_stock:
in_stock = in_stock[0][0] > 0 and 1 or 0
else:
in_stock = -1
return {
"price": price_list and webnotes.conn.sql("""select ref_rate, ref_currency from
`tabItem Price` where parent=%s and price_list_name=%s""",
(item_code, price_list), as_dict=1) or [],
"stock": in_stock
}
@webnotes.whitelist(allow_guest=True)
def get_product_list(search=None, product_group=None, start=0, limit=10):
# DOUBT: why is product_group param passed?
# base query
query = """select name, item_name, page_name, website_image, item_group,
web_long_description as website_description
from `tabItem` where docstatus = 0 and show_in_website = 1 """
# search term condition
if search:
query += """and (web_long_description like %(search)s or
item_name like %(search)s or name like %(search)s)"""
search = "%" + cstr(search) + "%"
# order by
query += """order by weightage desc, modified desc limit %s, %s""" % (start, limit)
data = webnotes.conn.sql(query, {
"search": search,
"product_group": product_group
}, as_dict=1)
return [get_item_for_list_in_html(r) for r in data]
def get_product_list_for_group(product_group=None, start=0, limit=10):
child_groups = ", ".join(['"' + i[0] + '"' for i in get_child_groups(product_group)])
# base query
query = """select name, item_name, page_name, website_image, item_group,
web_long_description as website_description
from `tabItem` where docstatus = 0 and show_in_website = 1
and (item_group in (%s)
or name in (select parent from `tabWebsite Item Group` where item_group in (%s))) """ % (child_groups, child_groups)
query += """order by weightage desc, modified desc limit %s, %s""" % (start, limit)
data = webnotes.conn.sql(query, {"product_group": product_group}, as_dict=1)
return [get_item_for_list_in_html(r) for r in data]
def get_child_groups(item_group_name):
item_group = webnotes.doc("Item Group", item_group_name)
return webnotes.conn.sql("""select name
from `tabItem Group` where lft>=%(lft)s and rgt<=%(rgt)s
and show_in_website = 1""", item_group.fields)
def get_group_item_count(item_group):
child_groups = ", ".join(['"' + i[0] + '"' for i in get_child_groups(item_group)])
return webnotes.conn.sql("""select count(*) from `tabItem`
where docstatus = 0 and show_in_website = 1
and (item_group in (%s)
or name in (select parent from `tabWebsite Item Group`
where item_group in (%s))) """ % (child_groups, child_groups))[0][0]
def get_item_for_list_in_html(r):
scrub_item_for_list(r)
r.template = "html/product_in_list.html"
return build_html(r)
def scrub_item_for_list(r):
if not r.website_description:
r.website_description = "No description given"
if len(r.website_description.split(" ")) > 24:
r.website_description = " ".join(r.website_description.split(" ")[:24]) + "..."
r.website_image = url_for_website(r.website_image)
def get_parent_item_groups(item_group_name):
item_group = webnotes.doc("Item Group", item_group_name)
return webnotes.conn.sql("""select name, page_name from `tabItem Group`
where lft <= %s and rgt >= %s
and ifnull(show_in_website,0)=1
order by lft asc""", (item_group.lft, item_group.rgt), as_dict=True)
def invalidate_cache_for(item_group):
for i in get_parent_item_groups(item_group):
if i.page_name:
delete_page_cache(i.page_name)
|
Python
| 0.99999 |
@@ -3417,138 +3417,8 @@
en%22%0A
-%09if len(r.website_description.split(%22 %22)) %3E 24:%0A%09%09r.website_description = %22 %22.join(r.website_description.split(%22 %22)%5B:24%5D) + %22...%22%0A
%09r.w
|
90981eef7d737d03b9a086b34b687f60d29d9e71
|
Add missing Exception
|
feature_ms2_clone.py
|
feature_ms2_clone.py
|
import sys
import os
import csv
import numpy
import pymzml
import xml
from pyteomics import mzid
MS1_Precision = 1e-5
def load_feature_table(fn):
table = []
with open(fn, 'r') as fh:
rd = csv.reader(fh, delimiter=',')
for row in rd:
if row[0] == 'FEATURE':
_, rt, mz, _, chg, _, _, _, _, rtl, rtr = row
table.append([float(mz), int(chg), float(rtl), float(rtr), float(rt)])
table.sort(key=lambda x: x[3])
return table
def load_mzid(fn, qval=0.001):
from pprint import pprint
psms = []
specids = [0]
psmReader = mzid.read(fn)
for psm in psmReader:
if psm.has_key('SpectrumIdentificationItem'):
try:
specids.append( int(psm['scan number(s)']))
except KeyError:
specids.append( int( psm['spectrumID'].split('=')[-1] ))
else:
pass
for match in psm['SpectrumIdentificationItem']:
if match['MS-GF:QValue'] < qval and match['rank'] == 1 and match['IsotopeError'] == 0 and 2 <= match['chargeState'] <= 4:
dm = match['experimentalMassToCharge'] - match['calculatedMassToCharge']
dm = dm * 1e6 / match['calculatedMassToCharge']
psms.append(dm)
return numpy.array(psms), max(specids)
def spectra_clone(feature_fn, mzml_fn, dm_offset, max_scan=0, full_iso_width=4.0):
features = load_feature_table(feature_fn)
iso_width = full_iso_width / 2.0
sys.stderr.write("Auto correct precursor m/z offset: %.2f ppm \n" % dm_offset)
if mzml_fn.endswith('.gz'):
fh = gzip.open(mzml_fn)
else:
fh = open(mzml_fn)
outpath = "%s.demix.mgf" % mzml_fn
sys.stdout = open(outpath, 'wb')
speciter = pymzml.run.Reader(mzml_fn)
timescale = 0
try:
for spec in speciter:
element = spec.xmlTree.next()
title = element.get('id')
idx = int(title.split('scan=')[-1])
if idx % 1000 == 0 and max_scan > 0:
sys.stderr.write("DeMix %d MS/MS (~%.1f%%)\n" % (idx, idx * 100.0 / max_scan))
if not timescale:
xmltext = xml.etree.ElementTree.tostring(element)
if xmltext.count(r'unitName="second"'):
timescale = 1
else:
timescale = 60
if spec['ms level'] == 2.0:
try:
rt = float(spec['scan time']) * timescale
except:
continue
for p in spec['precursors']:
pmz = float(p['mz'])
try:
pz = int(p['charge'])
except:
pz = 0
featured = False
peaks = sorted(filter(lambda x: x[1], spec.centroidedPeaks), key=lambda i: i[0])
for f in features:
fmz, fz, frt_left, frt_right, frt = f
if frt_left < rt < frt_right and abs(pmz - fmz) < iso_width:
if abs(pmz - fmz) / pmz <= MS1_Precision:
featured = True
print 'BEGIN IONS'
print 'TITLE=%d[%d:%f:%f]' % (idx, features.index(f), fmz, frt)
print 'RTINSECONDS=%f' % rt
print 'PEPMASS=%f' % (fmz - fmz * dm_offset * 1e-6)
print 'CHARGE=%d+' % fz
print 'RAWFILE=%s [%f:%d] diff:%f' % (title, pmz, pz, (fmz - pmz))
for a, b in peaks:
print a, b
print 'END IONS\n'
if featured == False and pz > 1:
print 'BEGIN IONS'
print 'TITLE=%d[-:%f:%f]' % (idx, pmz, rt)
print 'RTINSECONDS=%f' % rt
print 'PEPMASS=%f' % (pmz - pmz * dm_offset * 1e-6)
print 'CHARGE=%d+' % pz
print 'RAWFILE=%s' % (title)
for a, b in peaks:
print a, b
print 'END IONS\n'
except KeyError:
pass
return outpath
if __name__ == '__main__':
feature_fn = sys.argv[1] # feature csv table exported from FeatureXML by TOPP.
mzml_fn = sys.argv[2] # centroided MS/MS spectra in mzML, the same file which has been used in the first-pass database search.
rawpsm_fn = sys.argv[3] # first-pass database search result: Morpheus .PSMs.tsv file.
full_iso_width = float(sys.argv[4]) # the total width of precursor isolation window.
macc, max_scan = load_mzid(rawpsm_fn)
# sys.stderr.write("Mean Mass Error (ppm): %.3f SD: %.3f\n" % (macc.mean(), macc.std()))
# spectra_clone(feature_fn, mzml_fn, macc.mean(), max_scan, full_iso_width)
|
Python
| 0.998627 |
@@ -4330,32 +4330,33 @@
%0A except
+(
KeyError
:%0A pa
@@ -4335,32 +4335,45 @@
except (KeyError
+, ValueError)
:%0A pass
|
6902b88472826f6042dda6acda6f8a22d2fef64f
|
Change food color.
|
enactiveagents/model/structure.py
|
enactiveagents/model/structure.py
|
"""
Module that holds classes that represent structures.
"""
import world
class Structure(world.Entity):
"""
Class representing structures in the world (i.e., static but potentially
interactable with by agents).
"""
def collidable(self):
return True
class Wall(Structure):
"""
Class representing a wall structure.
"""
def __init__(self):
super(Wall, self).__init__()
self.height = 1
self.width = 1
class Block(Structure):
"""
Class representing a block structure.
"""
color = (122, 179, 62, 255)
def collidable(self):
return False
class Food(Structure):
"""
Class representing food.
"""
color = (179, 122, 62, 255)
def collidable(self):
return False
|
Python
| 0.000019 |
@@ -709,16 +709,20 @@
olor = (
+62,
179, 122
@@ -722,20 +722,16 @@
79, 122,
- 62,
255)%0A%0A
|
738ec72f78847bb31c89305247fcbe2d994117f0
|
Optimize case ObjectMixin.setUp
|
feder/cases/tests.py
|
feder/cases/tests.py
|
from django.core.urlresolvers import reverse
from django.test import RequestFactory, TestCase
from feder.monitorings.factories import MonitoringFactory
from feder.cases.models import Case
from feder.users.factories import UserFactory
from feder.institutions.factories import InstitutionFactory
from feder.main.mixins import PermissionStatusMixin
class ObjectMixin(object):
def setUp(self):
self.factory = RequestFactory()
self.user = UserFactory(username="john")
self.monitoring = self.permission_object = MonitoringFactory(user=self.user)
self.institution = InstitutionFactory()
self.case = Case.objects.create(name="blabla",
monitoring=self.monitoring,
institution=self.institution,
user=self.user)
class CaseListViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase):
status_anonymous = 200
status_no_permission = 200
def get_url(self):
return reverse('cases:list')
class CaseDetailViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase):
status_anonymous = 200
status_no_permission = 200
def get_url(self):
return reverse('cases:details', kwargs={'slug': self.case.slug})
class CaseCreateViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase):
permission = ['monitorings.add_case', ]
def get_url(self):
return reverse('cases:create', kwargs={'monitoring': str(self.monitoring.pk)})
class CaseUpdateViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase):
permission = ['monitorings.change_case', ]
def get_url(self):
return reverse('cases:update', kwargs={'slug': self.case.slug})
class CaseDeleteViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase):
permission = ['monitorings.delete_case', ]
def get_url(self):
return reverse('cases:delete', kwargs={'slug': self.case.slug})
|
Python
| 0.001096 |
@@ -65,24 +65,8 @@
port
- RequestFactory,
Tes
@@ -86,189 +86,35 @@
der.
-monitorings.factories import MonitoringFactory%0Afrom feder.cases.models import Case%0Afrom feder.users.factories import UserFactory%0Afrom feder.institutions.factories import Institution
+users.factories import User
Fact
@@ -169,16 +169,51 @@
usMixin%0A
+from .factories import CaseFactory%0A
%0A%0Aclass
@@ -258,48 +258,8 @@
f):%0A
- self.factory = RequestFactory()%0A
@@ -320,376 +320,82 @@
elf.
-monitoring = self.permission_object = MonitoringFactory(user=self.user)%0A self.institution = InstitutionFactory()%0A self.case = Case.objects.create(name=%22blabla%22,%0A monitoring=self.monitoring,%0A institution=self.institution,%0A user=self.user)
+case = CaseFactory()%0A self.permission_object = self.case.monitoring
%0A%0A%0Ac
@@ -1035,16 +1035,21 @@
tr(self.
+case.
monitori
|
8f280cece4d59e36ebfeb5486f25c7ac92718c13
|
Clean it up a bit
|
third_problem.py
|
third_problem.py
|
letters = 'bcdfghjklmnpqrtvwxyzBCDFGHJKLMNPQRTVWXYZ'
phrase = input()
output = ''
vowels = ''
phrase = phrase.replace(' ', '')
for char in phrase:
if char in letters:
output += char
else:
vowels += char
print(output)
print(vowels)
|
Python
| 0.000027 |
@@ -1,15 +1,17 @@
-letters
+not_vowel
= 'bcdf
@@ -66,16 +66,17 @@
input()%0A
+%0A
output =
@@ -92,16 +92,32 @@
s = ''%0A%0A
+# Remove sapces%0A
phrase =
@@ -181,15 +181,17 @@
in
-letters
+not_vowel
:%0A
@@ -210,16 +210,43 @@
+= char
+ # Add non vowel to output
%0A els
@@ -270,16 +270,40 @@
+= char
+ # Add vowels to vowels
%0A%0Aprint(
@@ -323,8 +323,9 @@
(vowels)
+%0A
|
ac0d1036e56e8c24945abedbc372c717b5d7064a
|
improve imprort style.
|
zcode/constants.py
|
zcode/constants.py
|
"""Common Numerical and Physical Constants.
"""
import numpy as np
import astropy as ap
import astropy.constants
import astropy.cosmology
from astropy.cosmology import WMAP9 as cosmo
# Fundamental Constants
# ---------------------
NWTG = ap.constants.G.cgs.value
SPLC = ap.constants.c.cgs.value
MSOL = ap.constants.M_sun.cgs.value
LSOL = ap.constants.L_sun.cgs.value
RSOL = ap.constants.R_sun.cgs.value
PC = ap.constants.pc.cgs.value
AU = ap.constants.au.cgs.value
YR = ap.units.year.to(ap.units.s)
MELC = ap.constants.m_e.cgs.value
MPRT = ap.constants.m_p.cgs.value
H0 = cosmo.H0.cgs.value # Hubble Constants at z=0.0
HPAR = cosmo.H0.value/100.0
OMEGA_M = cosmo.Om0
OMEGA_B = cosmo.Ob0
OMEGA_DM = cosmo.Odm0
RHO_CRIT = cosmo.critical_density0.cgs.value
# Higher order constants
# ----------------------
# Thomson-Scattering (Electron-Scattering) cross-section
try:
SIGMA_T = ap.constants.sigma_T.cgs.value
except:
SIGMA_T = 6.652458734e-25 # cm^2 (i.e. per electron)
# Electron-Scattering Opacity ($\kappa_{es} = n_e \sigma_T / \rho = \mu_e \sigma_T / m_p$)
# Where $\mu_e$ is the mean-mass per electron, for a total mass-density $\rho$.
KAPPA_ES = SIGMA_T/MPRT
# Derived Constants
# -----------------
PIFT = 4.0*np.pi/3.0 # (4.0/3.0)*Pi
SCHW = 2*NWTG/(SPLC*SPLC) # Schwarzschild Constant (2*G/c^2)
HTAU = 1.0/H0 # Hubble Time - 1/H0 [sec]
MYR = 1.0e6*YR
GYR = 1.0e9*YR
KPC = 1.0e3*PC
MPC = 1.0e6*PC
GPC = 1.0e9*PC
|
Python
| 0 |
@@ -132,16 +132,18 @@
smology%0A
+#
from ast
@@ -178,16 +178,48 @@
as cosmo
+%0Acosmo = astropy.cosmology.WMAP9
%0A%0A# Fund
|
4e6370bf4a76c0d510346124ea5aa49de0667372
|
Add motor configurations for new brushless motors
|
pyfrc/physics/motor_cfgs.py
|
pyfrc/physics/motor_cfgs.py
|
from collections import namedtuple
MotorModelConfig = namedtuple(
"MotorModelConfig",
[
"name",
"nominalVoltage",
"freeSpeed",
"freeCurrent",
"stallTorque",
"stallCurrent",
],
)
MotorModelConfig.__doc__ = """
Configuration parameters useful for simulating a motor. Typically these
parameters can be obtained from the manufacturer via a data sheet or other
specification.
RobotPy contains MotorModelConfig objects for many motors that are commonly
used in FRC. If you find that we're missing a motor you care about, please
file a bug report and let us know!
.. note:: The motor configurations that come with pyfrc are defined using the
pint units library. See :ref:`units`
"""
MotorModelConfig.name.__doc__ = "Descriptive name of motor"
MotorModelConfig.nominalVoltage.__doc__ = "Nominal voltage for the motor"
MotorModelConfig.freeSpeed.__doc__ = "No-load motor speed (``1 / [time]``)"
MotorModelConfig.freeCurrent.__doc__ = "No-load motor current"
MotorModelConfig.stallTorque.__doc__ = (
"Stall torque (``[length]**2 * [mass] / [time]**2``)"
)
MotorModelConfig.stallCurrent.__doc__ = "Stall current"
from .units import units
NOMINAL_VOLTAGE = 12 * units.volts
#: Motor configuration for CIM
MOTOR_CFG_CIM = MotorModelConfig(
"CIM",
NOMINAL_VOLTAGE,
5310 * units.cpm,
2.7 * units.amps,
2.42 * units.N_m,
133 * units.amps,
)
#: Motor configuration for Mini CIM
MOTOR_CFG_MINI_CIM = MotorModelConfig(
"MiniCIM",
NOMINAL_VOLTAGE,
5840 * units.cpm,
3.0 * units.amps,
1.41 * units.N_m,
89.0 * units.amps,
)
#: Motor configuration for Bag Motor
MOTOR_CFG_BAG = MotorModelConfig(
"Bag",
NOMINAL_VOLTAGE,
13180 * units.cpm,
1.8 * units.amps,
0.43 * units.N_m,
53.0 * units.amps,
)
#: Motor configuration for 775 Pro
MOTOR_CFG_775PRO = MotorModelConfig(
"775Pro",
NOMINAL_VOLTAGE,
18730 * units.cpm,
0.7 * units.amps,
0.71 * units.N_m,
134 * units.amps,
)
#: Motor configuration for Andymark RS 775-125
MOTOR_CFG_775_125 = MotorModelConfig(
"RS775-125",
NOMINAL_VOLTAGE,
5800 * units.cpm,
1.6 * units.amps,
0.28 * units.N_m,
18.0 * units.amps,
)
#: Motor configuration for Banebots RS 775
MOTOR_CFG_BB_RS775 = MotorModelConfig(
"RS775",
NOMINAL_VOLTAGE,
13050 * units.cpm,
2.7 * units.amps,
0.72 * units.N_m,
97.0 * units.amps,
)
#: Motor configuration for Andymark 9015
MOTOR_CFG_AM_9015 = MotorModelConfig(
"AM-9015",
NOMINAL_VOLTAGE,
14270 * units.cpm,
3.7 * units.amps,
0.36 * units.N_m,
71.0 * units.amps,
)
#: Motor configuration for Banebots RS 550
MOTOR_CFG_BB_RS550 = MotorModelConfig(
"RS550",
NOMINAL_VOLTAGE,
19000 * units.cpm,
0.4 * units.amps,
0.38 * units.N_m,
84.0 * units.amps,
)
del units
|
Python
| 0 |
@@ -2907,16 +2907,458 @@
mps,%0A)%0A%0A
+#: Motor configuration for NEO 550 Brushless Motor%0AMOTOR_CFG_NEO_550 = MotorModelConfig(%0A %22NEO 550%22,%0A NOMINAL_VOLTAGE,%0A 11000 * units.cpm,%0A 1.4 * units.amps,%0A 0.97 * units.N_m,%0A 100 * units.amps,%0A)%0A%0A#: Motor configuration for Falcon 500 Brushless Motor%0AMOTOR_CFG_FALCON_500 = MotorModelConfig(%0A %22Falcon 500%22,%0A NOMINAL_VOLTAGE,%0A 6380 * units.cpm,%0A 1.5 * units.amps,%0A 4.69 * units.N_m,%0A 257 * units.amps,%0A)%0A%0A
del unit
|
68206c7f6b396d03470b0499716181f978996364
|
implement url_fix()
|
feedservice/utils.py
|
feedservice/utils.py
|
#
# This file is part of my.gpodder.org.
#
# my.gpodder.org is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# my.gpodder.org is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public
# License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with my.gpodder.org. If not, see <http://www.gnu.org/licenses/>.
#
import time
from itertools import chain
def parse_time(value):
"""
>>> parse_time(10)
10
>>> parse_time('05:10') #5*60+10
310
>>> parse_time('1:05:10') #60*60+5*60+10
3910
"""
if value is None:
raise ValueError('None value in parse_time')
if isinstance(value, int):
# Don't need to parse already-converted time value
return value
if value == '':
raise ValueError('Empty valueing in parse_time')
for format in ('%H:%M:%S', '%M:%S'):
try:
t = time.strptime(value, format)
return t.tm_hour * 60*60 + t.tm_min * 60 + t.tm_sec
except ValueError, e:
continue
return int(value)
# from http://stackoverflow.com/questions/2892931/longest-common-substring-from-more-than-two-strings-python
# this does not increase asymptotical complexity
# but can still waste more time than it saves.
def shortest_of(strings):
return min(strings, key=len)
def longest_substr(strings):
"""
Returns the longest common substring of the given strings
"""
substr = ""
if not strings:
return substr
reference = shortest_of(strings) #strings[0]
length = len(reference)
#find a suitable slice i:j
for i in xrange(length):
#only consider strings long at least len(substr) + 1
for j in xrange(i + len(substr) + 1, length):
candidate = reference[i:j]
if all(candidate in text for text in strings):
substr = candidate
return substr
def flatten(l):
return chain.from_iterable(l)
|
Python
| 0.000018 |
@@ -1,10 +1,34 @@
+# -*- coding: utf-8 -*-%0A
#%0A
-
# This f
@@ -762,16 +762,47 @@
t chain%0A
+import urllib%0Aimport urlparse%0A%0A
%0A%0Adef pa
@@ -2322,16 +2322,16 @@
ten(l):%0A
-
retu
@@ -2356,8 +2356,897 @@
able(l)%0A
+%0A%0A# http://stackoverflow.com/questions/120951/how-can-i-normalize-a-url-in-python%0Adef url_fix(s, charset='utf-8'):%0A %22%22%22Sometimes you get an URL by a user that just isn't a real%0A URL because it contains unsafe characters like ' ' and so on. This%0A function can fix some of the problems in a similar way browsers%0A handle data entered by the user:%0A%0A %3E%3E%3E url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl%C3%A4rung)')%0A 'http://de.wikipedia.org/wiki/Elf%2520%2528Begriffskl%25C3%25A4rung%2529'%0A%0A :param charset: The target charset for the URL if the url was%0A given as unicode string.%0A %22%22%22%0A if isinstance(s, unicode):%0A s = s.encode(charset, 'ignore')%0A scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)%0A path = urllib.quote(path, '/%25')%0A qs = urllib.quote_plus(qs, ':&=')%0A return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))%0A
|
50e1edf150a715367e46d28f15ac8958bcc18644
|
Remove unused import
|
tinymce/views.py
|
tinymce/views.py
|
# coding: utf-8
# License: MIT, see LICENSE.txt
"""
django-tinymce4-lite views
"""
from __future__ import absolute_import
import json
import logging
from django import VERSION
from django.core.urlresolvers import reverse
from django.http import JsonResponse, HttpResponse
from django.shortcuts import render
from django.template.loader import render_to_string
from django.views.decorators.csrf import csrf_exempt
from django.utils.html import strip_tags
from django.conf import settings
from jsmin import jsmin
__all__ = ['spell_check', 'css', 'filebrowser']
logging.basicConfig(format='[%(asctime)s] %(module)s: %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
@csrf_exempt
def spell_check(request):
"""
Implements the TinyMCE 4 spellchecker protocol
:param request: Django http request with JSON-RPC payload from TinyMCE 4
containing a language code and a text to check for errors.
:type request: django.http.request.HttpRequest
:return: Django http response containing JSON-RPC payload
with spellcheck results for TinyMCE 4
:rtype: django.http.JsonResponse
"""
data = json.loads(request.body.decode('utf-8'))
output = {'id': data['id']}
error = None
try:
import enchant
from enchant.checker import SpellChecker
if data['params']['lang'] not in enchant.list_languages():
error = 'Missing {0} dictionary!'.format(data['params']['lang'])
raise RuntimeError(error)
checker = SpellChecker(data['params']['lang'])
checker.set_text(strip_tags(data['params']['text']))
output['result'] = {checker.word: checker.suggest() for err in checker}
except ImportError:
error = 'The pyenchant package is not installed!'
logger.exception(error)
except RuntimeError:
logger.exception(error)
except Exception:
error = 'Unknown error!'
logger.exception(error)
if error is not None:
output['error'] = error
return JsonResponse(output)
def css(request):
"""
Custom CSS for TinyMCE 4 widget
By default it fixes widget's position in Django Admin
:param request: Django http request
:type request: django.http.request.HttpRequest
:return: Django http response with CSS file for TinyMCE 4
:rtype: django.http.HttpResponse
"""
if 'grappelli' in settings.INSTALLED_APPS:
margin_left = 0
elif VERSION[0] == 1 and VERSION[1] <= 8:
margin_left = 110 # For old style admin
else:
margin_left = 170 # For Django >= 1.9 style admin
content = render_to_string('tinymce/tinymce4.css',
context={'margin_left': margin_left},
request=request)
response = HttpResponse(content, content_type='text/css')
response['Cache-Control'] = 'no-store'
return response
def filebrowser(request):
"""
JavaScript callback function for `django-filebrowser`_
:param request: Django http request
:type request: django.http.request.HttpRequest
:return: Django http response with filebrowser JavaScript code for for TinyMCE 4
:rtype: django.http.HttpResponse
.. _django-filebrowser: https://github.com/sehmaschine/django-filebrowser
"""
try:
fb_url = request.build_absolute_uri(reverse('fb_browse'))
except:
fb_url = request.build_absolute_uri(reverse('filebrowser:fb_browse'))
content = jsmin(render_to_string('tinymce/filebrowser.js',
context={'fb_url': fb_url},
request=request))
return HttpResponse(content, content_type='application/javascript')
|
Python
| 0.000001 |
@@ -270,44 +270,8 @@
nse%0A
-from django.shortcuts import render%0A
from
|
b64e7714e581cfc0c0a0d0f055b22c5edca27e24
|
Raise KeyboardInterrupt to allow the run to handle logout
|
susumutakuan.py
|
susumutakuan.py
|
import discord
import asyncio
import os
import signal
import sys
#Set up Client State
CLIENT_TOKEN=os.environ['TOKEN']
#Create Discord client
client = discord.Client()
#Handle shutdown gracefully
def sigterm_handler(signum, frame):
print("Logging out...")
client.logout()
print('Shutting down...')
sys.exit(1)
#Register SIGTERM Handler
signal.signal(signal.SIGTERM, sigterm_handler)
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
@client.event
async def on_message(message):
if message.content.startswith('!test'):
counter = 0
tmp = await client.send_message(message.channel, 'Calculating messages...')
async for log in client.logs_from(message.channel, limit=100):
if log.author == message.author:
counter += 1
await client.edit_message(tmp, 'You have {} messages.'.format(counter))
elif message.content.startswith('!sleep'):
await asyncio.sleep(5)
await client.send_message(message.channel, 'Done sleeping')
client.run(CLIENT_TOKEN)
|
Python
| 0 |
@@ -192,16 +192,22 @@
cefully%0A
+async
def sigt
@@ -271,23 +271,31 @@
-client.logout()
+raise KeyboardInterrupt
%0A
|
03e72a0ea5f303e7ef7dd18266ecac4fc0090cc8
|
Add section name in Introduction and Conclusion
|
zested/tutorial.py
|
zested/tutorial.py
|
import json
import os
from PySide import QtGui
class TutorialPart:
'''
A Tutorial is a recursive structure of TutorialPart.
'''
def __init__(self, title, path, have_introduction=False, have_conclusion=False):
self.title = title
self.path = path
self.have_introduction = have_introduction
self.have_conclusion = have_conclusion
self.children = []
def __str__(self):
return self.title
def append(self, other):
self.children.append(other)
@property
def conclusion(self):
if self.have_conclusion:
return os.path.join(self.path, "conclusion.md")
else:
return None
@property
def introduction(self):
if self.have_introduction:
return os.path.join(self.path, "introduction.md")
else:
return None
def tutorial_from_manifest(path):
with open(path, encoding="utf8") as fd:
manifest = json.load(fd)
base_path = os.path.dirname(path)
tutorial = TutorialPart(manifest["title"], base_path, True, True)
if manifest["type"] == "BIG":
tutorial = load_big_tuto(tutorial, manifest)
elif manifest["type"] == "MINI":
tutorial = load_mini_tuto(tutorial, manifest)
return tutorial
def load_big_tuto(tutorial, manifest):
for part in manifest["parts"]:
tuto_part = TutorialPart(part["title"],
os.path.join(tutorial.path, os.path.dirname(part["introduction"])),
True, True)
for chapter in part["chapters"]:
tuto_chapter = TutorialPart(chapter["title"],
os.path.join(tutorial.path, os.path.dirname(chapter["introduction"])),
True, True)
for extract in chapter["extracts"]:
tuto_chapter.append(TutorialPart(extract["title"],
os.path.join(tutorial.path, extract["text"])
))
tuto_part.append(tuto_chapter)
tutorial.append(tuto_part)
return tutorial
def load_mini_tuto(tutorial, manifest):
for extract in manifest["chapter"]["extracts"]:
tutorial.append(TutorialPart(extract["title"],
os.path.join(tutorial.path, extract["text"])
))
return tutorial
def render_tutorial(tutorial, widget, callback):
'''
Render a tutorial class to the tree view widget
The callback function is called when an item is double clicked
'''
content = widget.findChild(QtGui.QWidget, "tutorial_content")
content.clear()
create_tutorial_tree_view(content.invisibleRootItem(), tutorial, root=True)
title = widget.findChild(QtGui.QWidget, "tutorial_title")
MAX_TUTO_TITLE_LENGHT = 32
content.itemDoubleClicked.connect(callback)
if len(tutorial.title) > MAX_TUTO_TITLE_LENGHT:
title.setText(tutorial.title[:MAX_TUTO_TITLE_LENGHT] + "…")
else:
title.setText(tutorial.title)
class TutorialItem(QtGui.QTreeWidgetItem):
'''
A TutorialItem hold the tutorial item file path and title
'''
def __init__(self, path, title):
super().__init__()
self.path = path
self.title = title
def create_tutorial_tree_view(widget, section, root=False):
'''
Recusive function to render the tutorial class
'''
if not root:
child = TutorialItem(section.path, section.title)
widget.addChild(child)
child.setText(0, str(section))
root_widget = child if not root else widget
if section.introduction is not None:
child = TutorialItem(section.introduction, "Introduction")
root_widget.addChild(child)
child.setText(0, "Introduction")
for child_section in section.children:
create_tutorial_tree_view(root_widget, child_section)
if section.conclusion is not None:
child = TutorialItem(section.conclusion, "Conclusion")
root_widget.addChild(child)
child.setText(0, "Conclusion")
|
Python
| 0 |
@@ -182,23 +182,16 @@
ve_intro
-duction
=False,
@@ -201,20 +201,16 @@
e_conclu
-sion
=False):
@@ -305,23 +305,16 @@
ve_intro
-duction
%0A
@@ -348,20 +348,16 @@
e_conclu
-sion
%0A
@@ -1427,38 +1427,111 @@
s.path.join(
-tutorial.path,
+%0A tutorial.path,%0A
os.path.dir
@@ -1730,32 +1730,24 @@
-
os.path.join
@@ -1747,30 +1747,103 @@
th.join(
-tutorial.path,
+%0A tutorial.path,%0A
os.path
@@ -1874,32 +1874,24 @@
uction%22%5D)),%0A
-
@@ -2094,38 +2094,143 @@
s.path.join(
-tutorial.path,
+%0A tutorial.path,%0A
extract%5B%22te
@@ -3991,31 +3991,66 @@
duction,
- %22Introduction%22
+%0A %22Introduction %E2%80%94 %22 + section.title
)%0A
@@ -4314,29 +4314,60 @@
clusion,
- %22Conclusion%22
+%0A %22Conclusion %E2%80%94 %22 + section.title
)%0A
|
13e30fe6af93bbb48a4795ee22f4f3ba760adc14
|
add get_session_names
|
tmuxback/tmux.py
|
tmuxback/tmux.py
|
# -*- coding:utf-8 -*-
def get_session_names():
"""get session names"""
pass
|
Python
| 0.000002 |
@@ -16,16 +16,124 @@
f-8 -*-%0A
+import subprocess%0Aimport re%0A%0A%0A#tmux commands%0A%0A#list sessions%0ACMD_LIST_SESSIONS='tmux list-sessions -F#S'%0A%0A%0A%0A
def get_
@@ -156,19 +156,38 @@
%0A %22%22%22
-get
+ return a list of tmux
session
@@ -196,16 +196,17 @@
ames
+
%22%22%22%0A
pass
@@ -205,9 +205,174 @@
-pass
+s = subprocess.check_output(CMD_LIST_SESSIONS.split(' '))%0A s = re.sub('%5Cn$','',s)%0A return s.split('%5Cn')%0A%0A#if __name__ == '__main__':%0A# print get_session_names()
%0A
|
2e382c8bff2d0c3733b9b525168254971ca1175e
|
Update atexit function to avoid issues with late binding
|
python/pyspark/shell.py
|
python/pyspark/shell.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An interactive shell.
This file is designed to be launched as a PYTHONSTARTUP script.
"""
import atexit
import os
import platform
import warnings
from pyspark.context import SparkContext
from pyspark.sql import SparkSession
if os.environ.get("SPARK_EXECUTOR_URI"):
SparkContext.setSystemProperty("spark.executor.uri", os.environ["SPARK_EXECUTOR_URI"])
SparkContext._ensure_initialized() # type: ignore
try:
spark = SparkSession._create_shell_session() # type: ignore
except Exception:
import sys
import traceback
warnings.warn("Failed to initialize Spark session.")
traceback.print_exc(file=sys.stderr)
sys.exit(1)
sc = spark.sparkContext
sql = spark.sql
atexit.register(lambda: sc.stop())
# for compatibility
sqlContext = spark._wrapped
sqlCtx = sqlContext
print(r"""Welcome to
____ __
/ __/__ ___ _____/ /__
_\ \/ _ \/ _ `/ __/ '_/
/__ / .__/\_,_/_/ /_/\_\ version %s
/_/
""" % sc.version)
print("Using Python version %s (%s, %s)" % (
platform.python_version(),
platform.python_build()[0],
platform.python_build()[1]))
print("Spark context Web UI available at %s" % (sc.uiWebUrl))
print("Spark context available as 'sc' (master = %s, app id = %s)." % (sc.master, sc.applicationId))
print("SparkSession available as 'spark'.")
# The ./bin/pyspark script stores the old PYTHONSTARTUP value in OLD_PYTHONSTARTUP,
# which allows us to execute the user's PYTHONSTARTUP file:
_pythonstartup = os.environ.get('OLD_PYTHONSTARTUP')
if _pythonstartup and os.path.isfile(_pythonstartup):
with open(_pythonstartup) as f:
code = compile(f.read(), _pythonstartup, 'exec')
exec(code)
|
Python
| 0.99902 |
@@ -1490,16 +1490,28 @@
egister(
+(lambda sc:
lambda:
@@ -1520,16 +1520,21 @@
.stop())
+(sc))
%0A%0A# for
|
a635663dbfd164382803e201c79ce7647d0a4a1e
|
Mark bursting commands as not requiring an answer
|
txircd/server.py
|
txircd/server.py
|
from twisted.internet.protocol import Factory
from twisted.protocols.amp import AMP, Command, Integer, String, AmpBox, ListOf, IncompatibleVersions
from txircd.utils import CaseInsensitiveDictionary, now
current_version = 200 # Protocol version 0.2.0
# The protocol version should be incremented with changes of the protocol
# Breaking changes should be avoided except for major version upgrades or when it's otherwise unavoidable
# Keep a list of versions the current protocol is compatible with
# This list must include the current protocol version
compatible_versions = [ 200 ]
class RemoteUser(object):
class RemoteSocket(object):
class RemoteTransport(object):
def loseConnection():
pass
def __init__(self, secure):
self.transport = self.RemoteTransport()
self.secure = secure
def __init__(self, ircd, nick, ident, host, gecos, ip, server, secure):
self.ircd = ircd
self.socket = self.RemoteSocket(secure)
self.password = None
self.nickname = nick
self.username = ident
self.realname = gecos
self.hostname = host
self.ip = ip
self.server = server
self.signon = now()
self.lastactivity = now()
self.mode = {}
self.channels = CaseInsensitiveDictionary()
self.registered = 0
self.metadata = { # split into metadata key namespaces, see http://ircv3.atheme.org/specification/metadata-3.2
"server": {},
"user": {},
"client": {},
"ext": {},
"private": {}
}
self.cache = {}
self.cmd_extra = False # used by the command handler to determine whether the extras hook was called during processing
def register(self):
pass
def send_isupport(self):
pass # TODO?
def disconnect(self, reason):
pass # TODO
def connectionLost(self, reason):
pass # TODO
def handleCommand(self, command, prefix, params):
pass # TODO
def commandExtraHook(self, command, data):
pass # TODO
def sendMessage(self, command, *parameter_list, **kw):
pass # TODO
def setMetadata(self, namespace, key, value):
pass # TODO
def delMetadata(self, namespace, key):
pass # TODO
def prefix(self):
return "{}!{}@{}".format(self.nickname, self.username, self.hostname)
def hasAccess(self, channel, level):
if channel not in self.channels or level not in self.ircd.prefixes:
return None
status = self.status(channel)
if not status:
return False
return self.ircd.prefixes[status[0]][1] >= self.ircd.prefixes[level][1]
def status(self, channel):
if channel not in self.channels:
return ""
return self.channels[channel]["status"]
def certFP(self):
pass # TODO
def modeString(self, user):
modes = [] # Since we're appending characters to this string, it's more efficient to store the array of characters and join it rather than keep making new strings
params = []
for mode, param in self.mode.iteritems():
modetype = self.ircd.user_mode_type[mode]
if modetype > 0:
modes.append(mode)
if param:
params.append(self.ircd.user_modes[modetype][mode].showParam(user, self, param))
return ("+{} {}".format("".join(modes), " ".join(params)) if params else "+{}".format("".join(modes)))
def send_motd(self):
pass # TODO?
def send_lusers(self):
pass # TODO?
def report_names(self, channel):
pass # TODO?
def join(self, channel):
pass # TODO
def leave(self, channel):
pass # TODO
def nick(self, newNick):
pass # TODO
# ERRORS
class ServerAlreadyConnected(Exception):
pass
class ServerMismatchedIP(Exception):
pass
class ServerPasswordIncorrect(Exception):
pass
class ServerNoLink(Exception):
pass
class ModuleMismatch(Exception):
pass
# COMMANDS
class IntroduceServer(Command):
arguments = [
("name", String()), # server name
("password", String()), # server password specified in configuration
("description", String()), # server description
("version", Integer()), # protocol version
("commonmodules", ListOf(String()))
]
errors = {
ServerAlreadyConnected: "SERVER_ALREADY_CONNECTED",
ServerMismatchedIP: "SERVER_MISMATCHED_IP",
ServerPasswordIncorrect: "SERVER_PASS_INCORRECT",
ServerNoLink: "SERVER_NO_LINK",
ModuleMismatch: "MODULE_MISMATCH"
}
class BurstUsers(Command):
arguments = [
("users", ListOf(AmpBox()))
]
class BurstChannels(Command):
arguments = [
("channels", ListOf(AmpBox()))
]
class ServerProtocol(AMP):
def __init__(self):
self.ircd = self.factory # Let's stick to convention here.
self.burstComplete = []
def newServer(self, name, password, description, version, commonmodules):
if version not in compatible_versions:
raise IncompatibleVersions ("Protocol version {} is not compatible with this version".format(version))
commonModDiff = commonmodules ^ self.ircd.common_modules
if commonModDiff:
raise ModuleMismatch ("Common modules are not matched between servers: {}".format(", ".join(commonModDiff)))
if name not in self.ircd.servconfig["serverlinks"]:
raise ServerNoLink ("There is no link data in the configuration file for the server trying to link.")
linkData = self.ircd.servconfig["serverlinks"][name]
ip = self.transport.getPeer().host
if "ip" not in linkData or ip != linkData["ip"]:
raise ServerMismatchedIP ("The IP address for this server does not match the one in the configuration.")
if "incoming_password" not in linkData or password != linkData["incoming_password"]:
raise ServerPasswordIncorrect ("The password provided by the server does not match the one in the configuration.")
# TODO
IntroduceServer.responder(newServer)
def burstUsers(self, users):
pass # TODO
BurstUsers.responder(burstUsers)
def burstChannels(self, channels):
pass # TODO
BurstChannels.responder(burstChannels)
class ServerFactory(Factory):
protocol = ServerProtocol
def __init__(self, ircd):
self.ircd = ircd
# TODO: extend Factory to form ServerFactory as the base for all this; see app.py
|
Python
| 0.000015 |
@@ -4807,16 +4807,43 @@
H%22%0A %7D
+%0A requiresAnswer = False
%0A%0Aclass
@@ -4922,16 +4922,43 @@
))%0A %5D
+%0A requiresAnswer = False
%0A%0Aclass
@@ -5038,22 +5038,49 @@
Box()))%0A
-
%5D%0A
+ requiresAnswer = False%0A
%0A%0Aclass
|
10e6c53a39d3ee57d855ada1aa6e9d620f094465
|
add 'save' command
|
track-cli.py
|
track-cli.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import sys
import zmq
import logging
log = logging.getLogger('track_cli')
def print_info():
log.info("zeromq version: %s" % zmq.zmq_version())
log.info("pyzmq version: %s" % zmq.pyzmq_version())
def send_request(request):
context = zmq.Context()
req_socket = context.socket(zmq.REQ)
req_socket.connect('tcp://127.0.0.1:3456')
req_socket.send_json(request)
return req_socket.recv_json()
def handle_result(result):
if 'type' in result and result['type'] == 'error':
raise Exception('server replied with error: "%s"' % result['what'])
print(result)
def main():
args = sys.argv[1:]
if args == []:
print('no command provided')
return
elif args == ['quit']:
request = {'type': 'quit'}
elif args == ['version']:
request = {'type': 'version'}
elif args == ['apps']:
request = {'type': 'apps'}
elif args == ['current']:
request = {'type': 'current'}
elif args == ['rules']:
request = {'type': 'rules'}
elif args == ['help']:
print(['quit', 'version', 'apps', 'current', 'rules'])
sys.exit()
else:
raise Exception('command not handled: %s' % args)
try:
result = send_request(request)
handle_result(result)
except zmq.ZMQError as e:
log.error(e)
return
except KeyboardInterrupt:
log.info("got keyboard interrupt - exit")
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
|
Python
| 0.000009 |
@@ -1072,24 +1072,87 @@
: 'rules'%7D%0A%0A
+ elif args == %5B'save'%5D:%0A request = %7B'type': 'save'%7D%0A%0A
elif arg
|
0dea5f2b6a2e6d702167c3415d10a47275e30601
|
update the version to 0.6.0
|
zignal/__init__.py
|
zignal/__init__.py
|
"""
This is the zignal library
@author: Ronny Andersson ([email protected])
@copyright: (c) 2013 Ronny Andersson
@license: MIT
"""
__version__ = "0.5.0"
from .audio import *
from . import filters
from . import measure
from . import music
from . import sndcard
__all__ = [
'filters',
'measure',
'music',
'sndcard',
]
__all__.extend(audio.__all__) #@UndefinedVariable
|
Python
| 0.000005 |
@@ -148,9 +148,9 @@
%220.
-5
+6
.0%22%0A
|
ba5de58c2646f79c8a0fb5ec9e84b9b1d4cd0dd1
|
add GZipMiddleware.
|
pykeg/src/pykeg/settings.py
|
pykeg/src/pykeg/settings.py
|
# Pykeg main settings file.
# Note: YOU SHOULD NOT NEED TO EDIT THIS FILE. Instead, see the instructions in
# common_settings.py.example.
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.markup',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django_extensions',
'imagekit',
'pykeg.beerdb',
'pykeg.billing',
'pykeg.contrib.soundserver',
'pykeg.contrib.twitter',
'pykeg.core',
'pykeg.web',
'pykeg.web.api',
'pykeg.web.account',
'pykeg.web.kegweb',
'registration',
'socialregistration',
'south',
'django_nose', # must be after south
)
AUTH_PROFILE_MODULE = "core.UserProfile"
LOGIN_REDIRECT_URL = "/account/"
### Kegweb specific stuff
ROOT_URLCONF = 'pykeg.web.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates".
# Always use forward slashes, even on Windows.
"web/templates",
)
SITE_ID = 1
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/admin_media/'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
'pykeg.web.context_processors.enabled_features',
'pykeg.web.context_processors.kbsite',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'pykeg.web.middleware.KegbotSiteMiddleware',
'django.middleware.doc.XViewMiddleware',
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
### django.contrib.messages
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
### django-registration
ACCOUNT_ACTIVATION_DAYS = 3
# Bogus default values (to prevent djangofb from choking if unavailable);
# replace with site-specific values in common_settings.py, if desired.
FACEBOOK_API_KEY = ''
FACEBOOK_SECRET_KEY = ''
### Kegbot
# Number of last drinks to show on the main page.
KEGWEB_LAST_DRINKS_COUNT = 10
### Twitter
TWITTER_CONSUMER_KEY = ''
TWITTER_CONSUMER_SECRET_KEY =''
TWITTER_REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'
TWITTER_ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token'
TWITTER_AUTHORIZATION_URL = 'https://api.twitter.com/oauth/authorize'
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
try:
import common_settings
from common_settings import *
except ImportError:
print 'Error: Could not find common_settings.py'
print 'Most likely, this means kegbot has not been configured properly.'
print 'Consult setup documentation. Exiting...'
import sys
sys.exit(1)
### Optional stuff
if FACEBOOK_API_KEY and FACEBOOK_SECRET_KEY:
INSTALLED_APPS += ('pykeg.contrib.facebook',)
MIDDLEWARE_CLASSES += (
'socialregistration.middleware.FacebookMiddleware',
)
AUTHENTICATION_BACKENDS += (
'socialregistration.auth.FacebookAuth',
)
if TWITTER_CONSUMER_KEY and TWITTER_CONSUMER_SECRET_KEY:
AUTHENTICATION_BACKENDS += (
'socialregistration.auth.TwitterAuth',
)
|
Python
| 0 |
@@ -2109,24 +2109,69 @@
CLASSES = (%0A
+ 'django.middleware.gzip.GZipMiddleware',%0A
'django.
|
079109b06865dfd03dc563d9606ba51767449bff
|
Use argparse instead of hard-coded values.
|
testbuild.py
|
testbuild.py
|
import os
import platform
import subprocess
# Group these here for transparency and easy editing.
USED_REPOSITORY = 'CRYENGINE'
USED_TARGET = 'win_x86'
USED_CONFIG = 'Profile'
USED_BRANCH = 'release'
USED_VS_VERSION = '14.0'
TARGET_TO_SLN_TAG = {
'win_x86': 'Win32',
'win_x64': 'Win64'
}
def get_installed_vs_versions():
"""
Query the registry to find installed VS versions. Assumes that C++ support has been installed.
Throws an exception if the expected version of VS is not present.
:return: None
"""
import winreg
# Open the Visual Studio registry key.
reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
vskey = winreg.OpenKey(reg, r'SOFTWARE\Microsoft\VisualStudio')
subkeys = []
# Read all the subkeys
try:
i = 0
while True:
subkeys.append(winreg.EnumKey(vskey, i))
i += 1
except OSError:
pass
# If a subkey includes '.0' it's almost certainly a version number. I've yet to see one without that.
available_versions = [version for version in subkeys if '.0' in version]
if USED_VS_VERSION not in available_versions:
raise OSError('Visual Studio version {} is not installed (available: {}).'.format(USED_VS_VERSION,
available_versions))
def main():
"""
Get code from GitHub and perform an incremental build.
Assumes that the required SDKs directory is called 'SDKs' and is directly adjacent to the repo checkout directory.
"""
repository = USED_REPOSITORY
branch = USED_BRANCH
target = USED_TARGET
config = USED_CONFIG
build_dir = '_'.join([target, config.lower()])
steps = {
'clone': ['git', 'clone', 'https://github.com/CRYTEK-CRYENGINE/{repo}.git'.format(repo=repository)],
'pull': ['git', '-C', repository, 'pull'],
'checkout': ['git', 'checkout', branch],
# Quietly remove files that aren't tracked by git but leave the build folder in place (for incremental builds).
'clean': ['git', 'clean', '-dfq', '-e', 'Code/SDKs', '-e', build_dir],
# For now, assume Windows for convenience.
'configure': ['cmake', r'-DCMAKE_TOOLCHAIN_FILE=Tools\CMake\toolchain\windows\WindowsPC-MSVC.cmake', '..'],
'build': [os.path.normpath(r'C:\Program Files (x86)\MSBuild\{}\Bin\MSBuild.exe'.format(USED_VS_VERSION)),
'/property:Configuration={}'.format(config),
'CryEngine_CMake_{}.sln'.format(TARGET_TO_SLN_TAG.get(target))]
}
if os.path.exists(repository):
runstep(steps, 'pull')
else:
runstep(steps, 'clone')
os.chdir(repository)
runstep(steps, 'checkout')
runstep(steps, 'clean')
if os.path.exists(os.path.join('Code', 'SDKs')):
if platform.system() == 'Windows':
subprocess.check_call(['rmdir', r'Code\SDKs'], shell=True)
if not os.path.exists(os.path.join('Code', 'SDKs')):
if platform.system() == 'Windows':
subprocess.check_call(['mklink', '/J', r'Code\SDKs', r'..\SDKs'], shell=True)
print('Changing to build directory: {}'.format(build_dir))
if not os.path.exists(build_dir):
os.mkdir(build_dir)
os.chdir(build_dir)
runstep(steps, 'configure')
runstep(steps, 'build')
os.chdir('..')
if platform.system() == 'Windows':
subprocess.check_call(['rmdir', r'Code\SDKs'], shell=True)
runstep(steps, 'clean')
def runstep(steps, name):
"""
Run the command from *steps* corresponding to *name*.
:param steps: Dictionary of steps that can be run.
:param name: Name of the step to run.
"""
print('Running {} step with command "{}".'.format(name, ' '.join(steps[name])))
subprocess.check_call(steps[name])
if __name__ == '__main__':
main()
|
Python
| 0 |
@@ -3,16 +3,32 @@
port os%0A
+import argparse%0A
import p
@@ -58,191 +58,8 @@
ss%0A%0A
-%0A# Group these here for transparency and easy editing.%0AUSED_REPOSITORY = 'CRYENGINE'%0AUSED_TARGET = 'win_x86'%0AUSED_CONFIG = 'Profile'%0AUSED_BRANCH = 'release'%0AUSED_VS_VERSION = '14.0'%0A%0A
TARG
@@ -945,31 +945,30 @@
%0A if
-USED_VS_VERSION
+args.vcversion
not in
@@ -1077,31 +1077,30 @@
.format(
-USED_VS_VERSION
+args.vcversion
,%0A
@@ -1215,16 +1215,61 @@
ef main(
+repository, branch, target, config, vcversion
):%0A %22
@@ -1461,115 +1461,8 @@
%22%22%22%0A
- repository = USED_REPOSITORY%0A branch = USED_BRANCH%0A target = USED_TARGET%0A config = USED_CONFIG
%0A
@@ -1502,16 +1502,8 @@
nfig
-.lower()
%5D)%0A%0A
@@ -2192,23 +2192,17 @@
mat(
-USED_VS_VERSION
+vcversion
)),%0A
@@ -3629,15 +3629,761 @@
_':%0A
-main(
+parser = argparse.ArgumentParser('Test compilation of a CRYENGINE git repository.')%0A parser.add_argument('--repository', default='CRYENGINE', help='Repository name.')%0A parser.add_argument('--branch', default='release', help='Branch name.')%0A parser.add_argument('--target', default='win_x86', help='Compilation target.')%0A parser.add_argument('--vcversion', default='14.0', help='VC++ Version')%0A parser.add_argument('--config', default='profile', choices=%5B'debug', 'profile', 'release'%5D,%0A help='Compilation configuration (.')%0A args = parser.parse_args()%0A%0A main(repository=args.repository,%0A branch=args.branch,%0A target=args.target,%0A config=args.config,%0A vcversion=args.vcversion
)%0A
|
3645751422fb80e45e94c6647d4ca020f5db2d4a
|
Add regression test case.
|
st2api/tests/unit/controllers/v1/test_triggertypes.py
|
st2api/tests/unit/controllers/v1/test_triggertypes.py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from tests import FunctionalTest
http_client = six.moves.http_client
TRIGGER_0 = {
'name': 'st2.test.triggertype0',
'pack': 'dummy_pack_1',
'description': 'test trigger',
'payload_schema': {'tp1': None, 'tp2': None, 'tp3': None},
'parameters_schema': {}
}
TRIGGER_1 = {
'name': 'st2.test.triggertype1',
'pack': 'dummy_pack_2',
'description': 'test trigger',
'payload_schema': {'tp1': None, 'tp2': None, 'tp3': None},
}
TRIGGER_2 = {
'name': 'st2.test.triggertype3',
'pack': 'dummy_pack_3',
'description': 'test trigger',
'payload_schema': {'tp1': None, 'tp2': None, 'tp3': None},
'parameters_schema': {'param1': {'type': 'object'}}
}
class TestTriggerTypeController(FunctionalTest):
@classmethod
def setUpClass(cls):
# super's setUpClass does the following:
# - create DB connections, sets up a fresh DB etc.
# - creates all the controllers by instantiating the pecan app.
# The WebHookController ends up registering a TriggerType in its __init__
# which is why when this test is run individually it simply falls apart.
# When run in a suite the pecan app creation is somehow optimized and since
# this is not the first test to run its all good as some other test performs
# the DB cleanup. This is the unfortunate story of why these two lines in this
# exact order are needed. There are perhaps other ways to fix the problem
# however this is the most localized solution for now.
super(TestTriggerTypeController, cls).setUpClass()
cls._establish_connection_and_re_create_db()
def test_get_all(self):
post_resp = self.__do_post(TRIGGER_0)
trigger_id_0 = self.__get_trigger_id(post_resp)
post_resp = self.__do_post(TRIGGER_1)
trigger_id_1 = self.__get_trigger_id(post_resp)
resp = self.app.get('/v1/triggertypes')
self.assertEqual(resp.status_int, http_client.OK)
self.assertEqual(len(resp.json), 2, 'Get all failure.')
self.__do_delete(trigger_id_0)
self.__do_delete(trigger_id_1)
def test_get_one(self):
post_resp = self.__do_post(TRIGGER_1)
trigger_id = self.__get_trigger_id(post_resp)
get_resp = self.__do_get_one(trigger_id)
self.assertEqual(get_resp.status_int, http_client.OK)
self.assertEqual(self.__get_trigger_id(get_resp), trigger_id)
self.__do_delete(trigger_id)
def test_get_one_fail(self):
resp = self.__do_get_one('1')
self.assertEqual(resp.status_int, http_client.NOT_FOUND)
def test_post(self):
post_resp = self.__do_post(TRIGGER_1)
self.assertEqual(post_resp.status_int, http_client.CREATED)
self.__do_delete(self.__get_trigger_id(post_resp))
def test_post_with_params(self):
post_resp = self.__do_post(TRIGGER_2)
self.assertEqual(post_resp.status_int, http_client.CREATED)
self.__do_delete(self.__get_trigger_id(post_resp))
def test_post_duplicate(self):
post_resp = self.__do_post(TRIGGER_1)
org_id = self.__get_trigger_id(post_resp)
self.assertEqual(post_resp.status_int, http_client.CREATED)
post_resp_2 = self.__do_post(TRIGGER_1)
self.assertEqual(post_resp_2.status_int, http_client.CONFLICT)
self.assertEqual(post_resp_2.json['conflict-id'], org_id)
self.__do_delete(org_id)
def test_put(self):
post_resp = self.__do_post(TRIGGER_1)
update_input = post_resp.json
update_input['description'] = 'updated description.'
put_resp = self.__do_put(self.__get_trigger_id(post_resp), update_input)
self.assertEqual(put_resp.status_int, http_client.OK)
self.__do_delete(self.__get_trigger_id(put_resp))
def test_put_fail(self):
post_resp = self.__do_post(TRIGGER_1)
update_input = post_resp.json
# If the id in the URL is incorrect the update will fail since id in the body is ignored.
put_resp = self.__do_put(1, update_input)
self.assertEqual(put_resp.status_int, http_client.NOT_FOUND)
self.__do_delete(self.__get_trigger_id(post_resp))
def test_delete(self):
post_resp = self.__do_post(TRIGGER_1)
del_resp = self.__do_delete(self.__get_trigger_id(post_resp))
self.assertEqual(del_resp.status_int, http_client.NO_CONTENT)
@staticmethod
def __get_trigger_id(resp):
return resp.json['id']
def __do_get_one(self, trigger_id):
return self.app.get('/v1/triggertypes/%s' % trigger_id, expect_errors=True)
def __do_post(self, trigger):
return self.app.post_json('/v1/triggertypes', trigger, expect_errors=True)
def __do_put(self, trigger_id, trigger):
return self.app.put_json('/v1/triggertypes/%s' % trigger_id, trigger, expect_errors=True)
def __do_delete(self, trigger_id):
return self.app.delete('/v1/triggertypes/%s' % trigger_id)
|
Python
| 0.000005 |
@@ -2832,16 +2832,470 @@
lure.')%0A
+%0A # ?pack query filter%0A resp = self.app.get('/v1/triggertypes?pack=doesnt-exist-invalid')%0A self.assertEqual(resp.status_int, http_client.OK)%0A self.assertEqual(len(resp.json), 0)%0A%0A resp = self.app.get('/v1/triggertypes?pack=%25s' %25 (TRIGGER_0%5B'pack'%5D))%0A self.assertEqual(resp.status_int, http_client.OK)%0A self.assertEqual(len(resp.json), 1)%0A self.assertEqual(resp.json%5B0%5D%5B'pack'%5D, TRIGGER_0%5B'pack'%5D)%0A%0A
|
c91240cd43c4f714a404cf5f2ce566dad290c0c5
|
Add url mapping for ProjectEntrySumsAPIView
|
trex/urls.py
|
trex/urls.py
|
# -*- coding: utf-8 -*-
#
# (c) 2014 Bjoern Ricks <[email protected]>
#
# See LICENSE comming with the source of 'trex' for details.
#
from django.conf.urls import patterns, url
from django.views.generic import TemplateView
from trex.views import project
urlpatterns = patterns(
'',
url(r"^$",
TemplateView.as_view(template_name="index.html"),
name="index",
),
url(r"^api/1/projects/?$",
project.ProjectListCreateAPIView.as_view(),
name="project-list"),
url(r"^api/1/projects/(?P<pk>[0-9]+)/$",
project.ProjectDetailAPIView.as_view(),
name="project-detail"),
url(r"^api/1/projects/(?P<pk>[0-9]+)/entries/?$",
project.ProjectEntriesListAPIView.as_view(),
name="project-entries-list"),
url(r"^api/1/projects/(?P<pk>[0-9]+)/tags/?$",
project.ProjectTagsListAPIView.as_view(),
name="project-tags-list"),
url(r"^api/1/projects/(?P<pk>[0-9]+)/users/?$",
project.ProjectUsersListAPIView.as_view(),
name="project-users-list"),
url(r"^api/1/projects/(?P<pk>[0-9]+)/zeiterfassung/?$",
project.ProjectZeiterfassungAPIView.as_view(),
name="project-zeiterfassung"),
url(r"^api/1/entries/(?P<pk>[0-9]+)/?$",
project.EntryDetailAPIView.as_view(),
name="entry-detail"),
url(r"^api/1/tags/(?P<pk>[0-9]+)/?$",
project.TagDetailAPIView.as_view(),
name="tag-detail"),
)
|
Python
| 0 |
@@ -769,32 +769,180 @@
entries-list%22),%0A
+ url(r%22%5Eapi/1/projects/(?P%3Cpk%3E%5B0-9%5D+)/entries/sums/?$%22,%0A project.ProjectEntrySumsAPIView.as_view(),%0A name=%22project-entries-sums%22),%0A
url(r%22%5Eapi/1
|
80f26e7ef3987dddd0219f5500a9cdcfd6b6c51d
|
fix bug for searching.
|
torcms/handlers/search_handler.py
|
torcms/handlers/search_handler.py
|
# -*- coding:utf-8 -*-
from config import CMS_CFG
from torcms.core.base_handler import BaseHandler
from torcms.core.tool.whoosh_tool import YunSearch
from torcms.model.category_model import MCategory
from torcms.core.tools import logger
class SearchHandler(BaseHandler):
def initialize(self):
super(SearchHandler, self).initialize()
self.ysearch = YunSearch()
def get(self, *args, **kwargs):
url_str = args[0]
url_arr = self.parse_url(url_str)
if url_str == '':
self.index()
elif len(url_arr) == 2:
self.search(url_arr[0], url_arr[1])
elif len(url_arr) == 3:
self.search_cat(url_arr[0], url_arr[1], int(url_arr[2]))
else:
kwd = {
'info': '页面未找到',
}
self.render('html/404.html',
kwd=kwd,
userinfo=self.userinfo)
def index(self):
tag_enum = MCategory.query_pcat()
self.render('index/search.html', userinfo=self.userinfo,
cat_enum=tag_enum,
tag_enum=tag_enum)
def post(self, url_str=''):
catid = self.get_argument('searchcat').strip()
keyword = self.get_argument('keyword')
logger.info('Searching ... ')
logger.info(' catid: {uid}'.format(uid=catid))
logger.info(' keyowrds: {kw}'.format(kw=keyword))
if catid == '':
self.redirect('/search/{0}/1'.format(keyword))
else:
self.redirect('/search/{0}/{1}/1'.format(catid, keyword))
def search(self, keyword, p_index=''):
if p_index == '' or p_index == '-1':
current_page_number = 1
else:
current_page_number = int(p_index)
res_all = self.ysearch.get_all_num(keyword)
results = self.ysearch.search_pager(
keyword,
page_index=current_page_number,
doc_per_page=CMS_CFG['list_num']
)
page_num = int(res_all / CMS_CFG['list_num'])
kwd = {'title': '查找结果',
'pager': '',
'count': res_all,
'keyword': keyword,
'current_page': current_page_number}
self.render('doc/search/search.html',
kwd=kwd,
srecs=results,
pager=self.gen_pager_bootstrap_url('/search/{0}'.format(keyword),
page_num,
current_page_number),
userinfo=self.userinfo,
cfg=CMS_CFG)
def gen_pager_bootstrap_url(self, cat_slug, page_num, current):
'''
:param cat_slug: The category
:param page_num: The total number of the pages.
:param current: current page index.
:return:
'''
pager = ''
if page_num == 1 or page_num == 0:
pager = ''
elif page_num > 1:
pager_mid = ''
pager_pre = ''
pager_next = ''
pager_last = ''
pager_home = ''
pager = '<ul class="pagination">'
if current > 1:
pager_home = '''
<li class="{0}" name='fenye' onclick='change(this);'
><a href="{1}/{2}">首页</a></li>'''.format('', cat_slug, 1)
pager_pre = ''' <li class="{0}" name='fenye' onclick='change(this);'>
<a href="{1}/{2}">上一页</a></li>'''.format('', cat_slug, current - 1)
if current > 5:
cur_num = current - 4
else:
cur_num = 1
if page_num > 10 and cur_num < page_num - 10:
show_num = cur_num + 10
else:
show_num = page_num + 1
for num in range(cur_num, show_num):
if num == current:
checkstr = 'active'
else:
checkstr = ''
tmp_str_df = '''
<li class="{0}" name='fenye' onclick='change(this);'>
<a href="{1}/{2}">{2}</a></li>'''.format(checkstr, cat_slug, num)
pager_mid += tmp_str_df
if current < page_num:
pager_next = '''
<li class="{0}" name='fenye' onclick='change(this);'
><a href="{1}/{2}">下一页</a></li>'''.format('', cat_slug, current + 1)
pager_last = '''
<li class="{0}" name='fenye' onclick='change(this);'
><a href="{1}/{2}">末页</a></li>'''.format('', cat_slug, page_num)
pager += pager_home + pager_pre + pager_mid + pager_next + pager_last
pager += '</ul>'
else:
pass
return pager
def search_cat(self, catid, keyword, p_index=1):
catid = 'sid' + catid
logger.info('-' * 20)
logger.info('search cat')
logger.info('catid: {0}'.format(catid))
logger.info('keyword: {0}'.format(keyword))
# catid = ''
res_all = self.ysearch.get_all_num(keyword, catid=catid)
logger.info('all num: {0}'.format(res_all))
results = self.ysearch.search_pager(keyword,
catid=catid,
page_index=p_index,
doc_per_page=20)
page_num = int(res_all / 20)
kwd = {'title': '查找结果',
'pager': '',
'count': res_all,
'keyword': keyword,
# 'catname': '文档' if catid == '0000' else MCategory.get_by_uid(catid).name
}
self.render('doc/search/search.html',
kwd=kwd,
srecs=results,
pager=self.gen_pager_bootstrap_url('/search/{0}/{1}'.format(catid, keyword),
page_num,
p_index),
userinfo=self.userinfo,
cfg=CMS_CFG)
|
Python
| 0 |
@@ -1165,21 +1165,25 @@
-catid
+post_data
= self.
@@ -1190,37 +1190,96 @@
get_
-argument('searchcat').strip()
+post_data()%0A%0A catid = post_data%5B'searchcat'%5D if 'searchcat' in post_data else ''%0A
%0A
@@ -1297,25 +1297,17 @@
d =
-self.get_argument
+post_data
('ke
|
7cbee5e817b6d2bbf4fbcbf8cf1cf327bdbabc9c
|
rename locator_string to package_id
|
cms/djangoapps/contentstore/management/commands/migrate_to_split.py
|
cms/djangoapps/contentstore/management/commands/migrate_to_split.py
|
"""
Django management command to migrate a course from the old Mongo modulestore
to the new split-Mongo modulestore.
"""
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from xmodule.modulestore import Location
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.split_migrator import SplitMigrator
from xmodule.modulestore import InvalidLocationError
from xmodule.modulestore.django import loc_mapper
def user_from_str(identifier):
"""
Return a user identified by the given string. The string could be an email
address, or a stringified integer corresponding to the ID of the user in
the database. If no user could be found, a User.DoesNotExist exception
will be raised.
"""
try:
user_id = int(identifier)
except ValueError:
return User.objects.get(email=identifier)
else:
return User.objects.get(id=user_id)
class Command(BaseCommand):
"Migrate a course from old-Mongo to split-Mongo"
help = "Migrate a course from old-Mongo to split-Mongo"
args = "location email <locator>"
def parse_args(self, *args):
"""
Return a three-tuple of (location, user, locator_string).
If the user didn't specify a locator string, the third return value
will be None.
"""
if len(args) < 2:
raise CommandError(
"migrate_to_split requires at least two arguments: "
"a location and a user identifier (email or ID)"
)
try:
location = Location(args[0])
except InvalidLocationError:
raise CommandError("Invalid location string {}".format(args[0]))
try:
user = user_from_str(args[1])
except User.DoesNotExist:
raise CommandError("No user found identified by {}".format(args[1]))
try:
locator_string = args[2]
except IndexError:
locator_string = None
return location, user, locator_string
def handle(self, *args, **options):
location, user, locator_string = self.parse_args(*args)
migrator = SplitMigrator(
draft_modulestore=modulestore('default'),
direct_modulestore=modulestore('direct'),
split_modulestore=modulestore('split'),
loc_mapper=loc_mapper(),
)
migrator.migrate_mongo_course(location, user, locator_string)
|
Python
| 0.000355 |
@@ -1926,30 +1926,26 @@
-locator_string
+package_id
= args%5B
@@ -1986,30 +1986,26 @@
-locator_string
+package_id
= None%0A
@@ -2036,30 +2036,26 @@
, user,
-locator_string
+package_id
%0A%0A de
@@ -2112,30 +2112,26 @@
, user,
-locator_string
+package_id
= self.
@@ -2445,24 +2445,20 @@
, user,
-locator_string
+package_id
)%0A
|
c61187382c968c3018f88637806874ddd0b63b71
|
add log for debug
|
web/views.py
|
web/views.py
|
import requests
from django.http import Http404
from django.shortcuts import render, render_to_response, redirect
# Create your views here.
from django.template import RequestContext
from web.fetch import Fetcher
from settings import LEAN_CLOUD_ID, LEAN_CLOUD_SECRET
import leancloud
# @api_view(('GET',))
# def api_root(request, format=None):
# return Response({
# 'chairmans': reverse('chairman-list', request=request, format=format),
# })
def get_index(request):
# response = requests.get('http://127.0.0.1:8000/api/chairmans/')
# chairmans = response.json()
leancloud.init(LEAN_CLOUD_ID, LEAN_CLOUD_SECRET)
Chairman = leancloud.Object.extend('Chairman')
query = Chairman.query
query.select('type', 'href', 'id', 'title', 'img', 'name', 'num')
query.add_descending('num')
query_list = query.find()
chairmans = []
for chairman in query_list:
chairman_view = {}
chairman_view.type = chairman.get('type')
chairman_view.href = chairman.get('href')
chairman_view.id = chairman.get('id')
chairman_view.title = chairman.get('title')
chairman_view.img = chairman.get('img')
chairman_view.name = chairman.get('name')
chairman_view.num = chairman.get('num')
chairmans.append(chairman_view)
return render_to_response('index.html', locals())
def fetch(request):
leancloud.init(LEAN_CLOUD_ID, LEAN_CLOUD_SECRET)
query = leancloud.Query('Chairman')
allDataCompleted = False
batch = 0
limit = 1000
while not allDataCompleted:
query.limit(limit)
query.skip(batch * limit)
query.add_ascending('createdAt')
resultList = query.find()
if len(resultList) < limit:
allDataCompleted = True
leancloud.Object.destroy_all(resultList)
batch += 1
fetcher = Fetcher()
fetcher.fetch_cc()
fetcher.fetch_douyu()
fetcher.fetch_longzhu()
fetcher.fetch_quanmin()
fetcher.fetch_xiongmao()
fetcher.fetch_zhanqi()
fetcher.fetch_huya()
for chairman in fetcher.chairmans:
try:
chairman.save()
except Exception, e:
print e
return redirect("/")
|
Python
| 0.000001 |
@@ -933,24 +933,48 @@
uery_list:%0D%0A
+ print chairman%0D%0A
chai
|
547c8c6a1aef80ce43451479e0b79b51db5b717a
|
Fix headers types
|
pyshop/views/credentials.py
|
pyshop/views/credentials.py
|
# -*- coding: utf-8 -*-
"""
PyShop Credentials Views.
"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
import base64
from pyramid.httpexceptions import HTTPFound
from pyramid.url import resource_url, route_url
from pyramid.security import remember, forget
from pyramid.response import Response
from pyshop.helpers.i18n import trans as _
from pyshop.models import DBSession, User
from pyshop.compat import unicode
from .base import View
log = logging.getLogger(__name__)
class Login(View):
def render(self):
login_url = resource_url(self.request.context, self.request, 'login')
referrer = self.request.url
# never use the login form itself as came_from
if referrer == login_url:
referrer = '/'
came_from = self.request.params.get('came_from', referrer)
login = self.request.params.get('user.login', '')
if 'form.submitted' in self.request.params:
password = self.request.params.get('user.password', u'')
if password:
if User.by_ldap_credentials(self.session, login, password,
self.request.registry.settings) is not None:
log.info('login %r succeed' % login)
headers = remember(self.request, login)
return HTTPFound(location=came_from,
headers=headers)
if User.by_credentials(self.session, login, password) is not None:
log.info('login %r succeed' % login)
headers = remember(self.request, login)
return HTTPFound(location=came_from,
headers=headers)
return {'came_from': came_from,
'user': User(login=login),
}
class Logout(View):
def render(self):
return HTTPFound(location=route_url('index', self.request),
headers=forget(self.request))
def authbasic(request):
"""
Authentification basic, Upload pyshop repository access
"""
if len(request.environ.get('HTTP_AUTHORIZATION','')) > 0:
auth = request.environ.get('HTTP_AUTHORIZATION')
scheme, data = auth.split(None, 1)
assert scheme.lower() == 'basic'
data = base64.b64decode(data)
if not isinstance(data, unicode):
data = data.decode('utf-8')
username, password = data.split(':', 1)
if User.by_ldap_credentials(DBSession(), username, password, request.registry.settings):
return HTTPFound(location=request.url)
if User.by_credentials(DBSession(), username, password):
return HTTPFound(location=request.url)
return Response(status=401,
headerlist=[('WWW-Authenticate',
'Basic realm="pyshop repository access"'
)],
)
|
Python
| 0.000002 |
@@ -2834,16 +2834,17 @@
rlist=%5B(
+b
'WWW-Aut
@@ -2888,16 +2888,17 @@
+b
'Basic r
|
42dfb6850be83ba17b9e649a499926d31f1afa95
|
Fixing command.
|
windmill/browser/chrome.py
|
windmill/browser/chrome.py
|
import commands
import tempfile
import logging
import signal
import subprocess
import sys, os
import urlparse
import windmill
logger = logging.getLogger(__name__)
import safari
class Chrome(safari.Safari):
def __init__(self):
self.chrome_binary = windmill.settings['CHROME_BINARY']
self.test_url = windmill.settings['TEST_URL']
# def create_redirect(self):
# self.redirection_page = tempfile.mktemp(suffix='.html')
# f = open(self.redirection_page, 'w')
# test_url = windmill.get_test_url(windmill.settings['TEST_URL'])
# f.write( html_redirection.replace('{replace}', test_url) )
# f.flush() ; f.close()
# def set_proxy_mac(self):
# """Set local Proxy"""
# self.netsetup_binary = windmill.settings['NETWORKSETUP_BINARY']
# interface_name = find_default_interface_name()
# uri = urlparse.urlparse(self.test_url)
# set_proxy_command = ' '.join([ self.netsetup_binary,
# '-setwebproxy',
# '"'+interface_name+'"',
# 'localhost',
# str(windmill.settings['SERVER_HTTP_PORT'])
# ])
# commands.getoutput(set_proxy_command)
# enable_proxy_command = ' '.join([ self.netsetup_binary,
# '-setwebproxystate',
# '"'+interface_name+'"',
# 'on'
# ])
# commands.getoutput(enable_proxy_command)
# self.create_redirect()
# self.interface_name = interface_name
#
# def unset_proxy_mac(self):
# commands.getoutput(' '.join([self.netsetup_binary, '-setwebproxystate', '"'+self.interface_name+'"', 'off']))
def set_proxy_windows(self):
import ie
self.ie_obj = ie.InternetExplorer()
self.ie_obj.set_proxy()
#
# def unset_proxy_windows(self):
# self.ie_obj.unset_proxy()
def start(self):
"""Start Chrome"""
# if sys.platform == 'darwin':
# self.set_proxy_mac()
if os.name == 'nt' or sys.platform == 'cygwin':
self.set_proxy_windows()
kwargs = {'stdout':sys.stdout ,'stderr':sys.stderr, 'stdin':sys.stdin}
self.p_handle = subprocess.Popen([self.chrome_binary, '--homepage', self.test_url], **kwargs)
logger.info([self.chrome_binary, self.redirection_page])
def kill(self, kill_signal=None):
"""Stop Chrome"""
# if sys.platform == 'darwin':
# self.unset_proxy_mac()
if os.name == 'nt' or sys.platform == 'cygwin':
self.unset_proxy_windows()
try:
self.p_handle.kill(group=True)
except:
logger.error('Cannot kill Chrome')
# def stop(self):
# self.kill(signal.SIGTERM)
#
# def is_alive(self):
# if self.p_handle.poll() is None:
# return False
# return True
|
Python
| 0.999753 |
@@ -2417,41 +2417,18 @@
-self.p_h
+comm
and
-le
=
-subprocess.Popen(
%5Bsel
@@ -2475,81 +2475,119 @@
_url
-%5D, **kwargs)%0A%09 logger.info(%5Bself.chrome_binary, self.redirection_page%5D
++'/windmill-serv/start.html'%5D%0A%09 self.p_handle = subprocess.Popen(command, **kwargs)%0A%09 logger.info(command
)%0A%0A%09
|
0921f78660b7b0784ebe2fa586dd54551704699e
|
Fix fix_gir.py to work with ginterfaces and to support delegates.
|
tools/fix_gir.py
|
tools/fix_gir.py
|
#!/usr/bin/python
from xml.dom import minidom
def purge_white_space_and_fix_namespace(node, indent=0):
if getattr(node, "tagName", None) == "namespace":
name = node.getAttribute("name")
node.setAttribute("name", name.lstrip('_'))
for child in [c for c in node.childNodes]:
if child.nodeType == node.TEXT_NODE or \
getattr(child, "tagName", None) == "annotation":
node.removeChild(child)
continue
purge_white_space_and_fix_namespace(child, indent+1)
def find_ancestor(node, name):
if getattr(node, "tagName") == name:
return node
parent = getattr(node, "parentNode", None)
if not parent:
return None
return find_ancestor(parent, name)
def fix_vfuncs(dom):
for f in dom.getElementsByTagName("callback"):
record = find_ancestor(f, "record")
if not record:
continue
name = record.getAttribute("name")
cname = record.getAttribute("c:type")
assert(name.endswith("Class"))
assert(cname.endswith("Class"))
params = (f.getElementsByTagName("parameters") or [None])[0]
if not params:
params = dom.createElement("parameters")
f.insertBefore(params, f.firstChild)
param = dom.createElement("parameter")
param.setAttribute("name", "self")
param.setAttribute("transfer-ownership", "none")
ptype = dom.createElement("type")
ptype.setAttribute("name", name[:-5])
ptype.setAttribute("c:type", cname[:-5])
param.appendChild(ptype)
params.insertBefore(param, params.firstChild)
if __name__ == "__main__":
import sys
if len(sys.argv) != 2:
print "supply a gir file"
sys.exit(1)
dom = minidom.parse(sys.argv[-1])
purge_white_space_and_fix_namespace(dom)
fix_vfuncs(dom)
print dom.toprettyxml(indent=" ", newl="\n")
|
Python
| 0 |
@@ -586,16 +586,22 @@
tagName%22
+, None
) == nam
@@ -1037,16 +1037,42 @@
%22Class%22)
+ or name.endswith(%22Iface%22)
)%0A
@@ -1103,16 +1103,42 @@
%22Class%22)
+ or name.endswith(%22Iface%22)
)%0A%0A
|
2c4cf38b7251ddffaba954f71bbca9632123777c
|
Add start_wizbit_server function that registers and publishes a wizbit server.
|
wizd/wizd.py
|
wizd/wizd.py
|
#! /usr/bin/env python
import sys
import socket
import os
import SimpleXMLRPCServer
import gobject
from wizbit import ServicePublisher, ServiceBrowser
WIZBIT_SERVER_PORT = 3492
from wizbit import Shares, Directory
from wizbit import *
class WizbitServer():
def getShares(self):
shares = Shares.getShares()
return shares
def getPath(self, uuid):
shares = Shares.getShares()
for id, shareId, directory in shares:
if uuid == id:
break
return directory
def getLastConfSeen(self, uuid):
return "Not Implemented"
def setConf(self, uuid, confstring):
return "Not Implemented"
def getConf(self, uuid):
shares = Shares.getShares()
for id, shareId, directory in shares:
if uuid == id:
break
wizpath = Paths(directory)
file = open(wizpath.getWizconf(), "r")
conf = file.read()
file.close()
return conf
def pushNotify(self, dirId, remoteShareId, host):
#For every local directory with the same shareId, pull
#from the remote directory
shares = Shares.getShares()
for id, localShareId, directory in shares:
if localShareId == remoteShareId:
Directory.pull(directory, dirId, host)
def server_socket_error():
print "RPC server socket was disconnected, exiting"
global main_loop
main_loop.quit()
def server_callback(source, cb_condition, server):
server.handle_request()
def main(args):
servinst = WizbitServer()
server = SimpleXMLRPCServer.SimpleXMLRPCServer(("", 0))
server.register_instance(servinst)
server.register_introspection_functions()
gobject.io_add_watch (server.fileno(), gobject.IO_IN, server_callback, server)
gobject.io_add_watch (server.fileno(), gobject.IO_HUP | gobject.IO_ERR, server_socket_error)
sp = ServicePublisher("Wizbit", "_wizbit._tcp", server.server_address[1])
sb = ServiceBrowser("_wizbit._tcp")
global main_loop
main_loop = gobject.MainLoop()
try:
main_loop.run()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
Python
| 0 |
@@ -1336,21 +1336,33 @@
()%0A%0A
+%0A
def
-main(args
+start_wizbit_server(
):%0A%09
@@ -1811,33 +1811,82 @@
_tcp%22)%0A%0A
-%09global main_loop
+%0Adef main(args):%0A%09global main_loop%0A%0A start_wizbit_server()%0A
%0A%09main_l
|
c24eeba615fee01121041faac84a106d4d5dd3cf
|
Add check for equality.
|
pypeline/common/versions.py
|
pypeline/common/versions.py
|
#!/usr/bin/python
#
# Copyright (c) 2012 Mikkel Schubert <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import re
import types
import subprocess
from pypeline.common.utilities import safe_coerce_to_tuple, try_cast
# Cache used to store the output of cmd-line / function calls
_CALL_CACHE = {}
# Cache used to store Requirement object
_REQUIREMENT_CACHE = {}
class VersionRequirementError(RuntimeError):
pass
def Requirement(call, search, checks, pprint = None, name = None):
"""Returns a singleton Requirement object, based on the parameters,
which may be used to check that version requirements are met for a
given program/utility/module, etc.
Parameters:
name -- Descriptive name for the executable/module/etc. If not
specified, first value in call will be used.
call -- A string, or a tuple containing strings for a system call,
or a tuple containing a function at the first position, and
a set of positional parameters. In the case of system calls,
stdout and stderr are returned as a single string.
search -- A regular expression (string or re object), used to search
the output of the "call". Groups are assumed to represent
version numbers.
pprint -- A function that takes a tuple of version fields and returns
a string, or a format string that may be used to convert such
a tuple. If not specified, the version fields will be converted
to strings and joined by '.'s.
checks -- A callable that carries out any required version-checks. Is
called as "checks(value, pprint). Should raise an exception
(e.g. VersionRequirementError) in the case of requirements
not being met.
Implementation detail: To reduce the need for performing calls or system-
calls multiple times, caches are implemented using the call object as keys.
Thus the same calls should be passed in a manner which allow equality between
the same calls to be established.
"""
call = safe_coerce_to_tuple(call)
key = (call, search, pprint, checks, name)
try:
requirement = _REQUIREMENT_CACHE[key]
except KeyError:
requirement = RequirementObj(*key)
_REQUIREMENT_CACHE[key] = requirement
return requirement
class RequirementObj:
def __init__(self, call, search, pprint, checks, name = None):
self._done = None
self.name = name or call[0]
self._call = call
self._reqs = checks
self._rege = re.compile(search)
self._ppr = pprint
self._version = None
@property
def version(self):
if self._version is None:
output = _do_call(self._call)
match = self._rege.search(output)
if not match:
raise VersionRequirementError("Could not determine version of '%s', searching for %s: %s" \
% (self.name, repr(self._rege.pattern), repr(output)))
self._version = tuple(try_cast(value, int) for value in match.groups())
return self._version
def __call__(self, force = False):
if force or self._done is None:
def _pprint(value):
if not self._ppr:
return ".".join(map(str, value))
elif callable(self._ppr):
return self._ppr(value)
return self._ppr.format(*value)
self._reqs(self.version, _pprint)
self._done = True
class _Check:
def __init__(self, name, *version):
self._version = tuple(version)
self._desc = (str(name), self._version)
def __hash__(self):
return hash(self._desc)
def __cmp__(self, other):
if isinstance(other, _Check):
return cmp(self._desc, other._desc)
return cmp(self.__class__, other.__class__)
def __str__(self):
return "%s%s" % self._desc
def __repr__(self):
return str(self)
class GE(_Check):
def __init__(self, *version):
_Check.__init__(self, "GE", *version)
def __call__(self, value, pprint):
if not value >= self._version:
raise VersionRequirementError("Version must be at least %s, found %s" \
% (pprint(self._version), pprint(value)))
class LT(_Check):
def __init__(self, *version):
_Check.__init__(self, "LE", *version)
def __call__(self, value, pprint):
if not value < self._version:
raise VersionRequirementError("Version must be below %s, found %s" \
% (pprint(self._version), pprint(value)))
class And(_Check):
def __init__(self, *checks):
self._checks = checks
_Check.__init__(self, "Or", *checks)
def __call__(self, value, pprint):
for check in self._checks:
check(value, pprint)
def _run(call):
proc = subprocess.Popen(call, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
stdoutdata, stderrdata = proc.communicate()
return stdoutdata + "\n" + stderrdata
def _do_call(call):
try:
return _CALL_CACHE[call]
except KeyError:
if callable(call[0]):
result = call[0](*call[1:])
else:
result = _run(call)
_CALL_CACHE[call] = result
return result
|
Python
| 0 |
@@ -5129,24 +5129,357 @@
str(self)%0A%0A%0A
+class EQ(_Check):%0A def __init__(self, *version):%0A _Check.__init__(self, %22EQ%22, *version)%0A%0A def __call__(self, value, pprint):%0A if value != self._version:%0A raise VersionRequirementError(%22Version must be %25s, found %25s%22 %5C%0A %25 (pprint(self._version), pprint(value)))%0A%0A
class GE(_Ch
|
5258c7d70796a03361ad865a15fd3896bb7a95f1
|
Fix tests
|
pypeman/tests/test_nodes.py
|
pypeman/tests/test_nodes.py
|
import unittest
import asyncio
class FakeChannel():
def __init__(self):
self.uuid = 'fakeChannel'
class NodesTests(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
#asyncio.set_event_loop(None)
def test_log_node(self):
""" if Log() node is functionnal """
from pypeman.nodes import Log
from pypeman import message
n = Log()
n.channel = FakeChannel()
m = message.Message()
@asyncio.coroutine
def go():
ret = yield from n.handle(m)
return ret
self.loop.run_until_complete(go())
def test_json_to_python_node(self):
""" if JsonToPython() node is functionnal """
from pypeman.nodes import JsonToPython
from pypeman import message
n = JsonToPython()
n.channel = FakeChannel()
m = message.Message()
m.payload = '{"test":2}'
@asyncio.coroutine
def go():
ret = yield from n.handle(m)
return ret
self.loop.run_until_complete(go())
|
Python
| 0.000003 |
@@ -24,16 +24,31 @@
asyncio%0A
+import logging%0A
%0A%0Aclass
@@ -78,32 +78,74 @@
__init__(self):%0A
+ self.logger = logging.getLogger()%0A
self.uui
|
291bdbc3eef6548a1eebe6d92c7c347f3df7e2b6
|
version bump
|
pyramid_restful/__init__.py
|
pyramid_restful/__init__.py
|
from .settings import reload_api_settings
__version__ = '0.9.1'
VERSION = __version__
def includeme(config):
reload_api_settings(config.registry.settings)
|
Python
| 0.000001 |
@@ -57,11 +57,12 @@
'0.
-9.1
+10.0
'%0A%0AV
|
175cfe45aba554d1544be3ee71bdb8a7b499d879
|
add radius in request
|
wtm/views.py
|
wtm/views.py
|
import urllib2
from lxml import etree
from deform import Form
from pyramid.view import view_config
from wtm.schemas.home import HomeSchema
@view_config(route_name='home', renderer='templates/home.pt')
def home(request):
"""
home page
"""
homeForm = Form(HomeSchema(), buttons=('submit',), action=request.route_path('addContent'))
return {'form': homeForm.render()}
@view_config(route_name='addContent', renderer='json')
def addContent(request):
baseURL = 'http://www.overpass-api.de/api/interpreter'
data = 'node(around:250.0,%s,%s)["amenity"="cafe"];out;' % (request.POST['lat'], request.POST['lon'])
print data
url = urllib2.Request(baseURL, data)
xmlData = urllib2.urlopen(url).read()
xml = etree.fromstring(xmlData)
for node in xml.xpath('node/tag[@k="name"]'):
print node.get('v')
return ''
|
Python
| 0 |
@@ -554,11 +554,10 @@
und:
-250
+%25s
.0,%25
@@ -607,14 +607,162 @@
ST%5B'
-lat'%5D,
+dist'%5D,%0A request.POST%5B'lat'%5D,%0A
req
|
2313a796842cbe65563a62fe12edec06c4112531
|
Add YEARS_PEY_DAY.
|
pyrate/core/ifgconstants.py
|
pyrate/core/ifgconstants.py
|
# This Python module is part of the PyRate software package.
#
# Copyright 2017 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This Python module contains a collection of constants used in
various components of the PyRate software
"""
# lookup keys for the metadata fields in PyRate GeoTIFF files
PYRATE_NCOLS = 'NCOLS'
PYRATE_NROWS = 'NROWS'
PYRATE_X_STEP = 'X_STEP'
PYRATE_Y_STEP = 'Y_STEP'
PYRATE_LAT = 'LAT'
PYRATE_LONG = 'LONG'
MASTER_DATE = 'MASTER_DATE'
MASTER_TIME = 'MASTER_TIME'
SLAVE_DATE = 'SLAVE_DATE'
SLAVE_TIME = 'SLAVE_TIME'
EPOCH_DATE = 'EPOCH_DATE'
PYRATE_DATUM = 'DATUM'
PYRATE_TIME_SPAN = 'TIME_SPAN_YEAR'
PYRATE_WAVELENGTH_METRES = 'WAVELENGTH_METRES'
PYRATE_INCIDENCE_DEGREES = 'INCIDENCE_DEGREES'
PYRATE_INSAR_PROCESSOR = 'INSAR_PROCESSOR'
PYRATE_WEATHER_ERROR = 'WEATHER_ERROR'
PYRATE_APS_ERROR = 'APS_ERROR'
PYRATE_MAXVAR = 'CVD_MAXVAR'
PYRATE_ALPHA = 'CVD_ALPHA'
COHERENCE = 'COHERENCE_MASKED_MULTILOOKED_IFG'
MULTILOOKED = 'MULTILOOKED_IFG'
ORIG = 'ORIGINAL_IFG'
DEM = 'ORIGINAL_DEM'
MLOOKED_DEM = 'MULTILOOKED_DEM'
INCIDENCE = 'INCIDENCE_ANGLE_MAP'
MLOOKED_INC = 'MULTILOOKED_INCIDENCE_ANGLE_MAP'
INCR = 'INCREMENTAL_TIME_SLICE'
CUML = 'CUMULATIVE_TIME_SLICE'
LINRATE = 'LINEAR_RATE_MAP'
LINERROR = 'LINEAR_RATE_ERROR_MAP'
LINSAMP = 'LINEAR_RATE_SAMPLES'
PYRATE_ORBITAL_ERROR = 'ORBITAL_ERROR'
ORB_REMOVED = 'REMOVED'
APS_REMOVED = 'REMOVED'
PYRATE_REF_PHASE = 'REFERENCE_PHASE'
REF_PHASE_REMOVED = 'REMOVED'
NAN_STATUS = 'NAN_STATUS'
NAN_CONVERTED = 'CONVERTED'
DATA_TYPE = 'DATA_TYPE'
DATA_UNITS = 'DATA_UNITS'
DAYS_PER_YEAR = 365.25 # span of year, not a calendar year
SPEED_OF_LIGHT_METRES_PER_SECOND = 3e8
MM_PER_METRE = 1000
|
Python
| 0.999045 |
@@ -2142,16 +2142,49 @@
ar year%0A
+YEARS_PER_DAY = 1 / DAY_PER_YEAR%0A
SPEED_OF
|
d43cf2adeb5bc5e5546dbf58532bfc283fc94ea8
|
fix sort order of combined citation information
|
python/citation_vim/item.py
|
python/citation_vim/item.py
|
# -*- coding:utf-8 -*-
from citation_vim.utils import compat_str, is_current
class Item(object):
"""
Intermediary object between bibtex/zotero and unite source output.
"""
def combine(self):
pairs = {
'Key': self.key,
'Title': self.title,
'Author(s)': self.author,
'Date': self.date,
'Tags': self.tags,
'Collections': ', '.join(self.collections),
'Publication': self.publication,
'Issue': self.issue,
'Volume': self.volume,
'Pages': self.pages,
'Publisher': self.publisher,
'Language': self.language,
'Abstract': self.abstract,
'Notes': self.notes,
'File(s)': self.file,
'URL': self.url,
'DOI': self.doi,
'ISBN': self.isbn}
self.combined = u"Available citation information:\n"
for key, value in pairs.items():
if value:
self.combined += " " + key + " : " + compat_str(value) + "\n"
|
Python
| 0.000497 |
@@ -17,16 +17,35 @@
-8 -*-%0A%0A
+import collections%0A
from cit
@@ -243,9 +243,33 @@
s =
-%7B
+collections.OrderedDict(%5B
%0A
@@ -281,14 +281,15 @@
+(
'Key'
-:
+,
sel
@@ -293,16 +293,17 @@
self.key
+)
,%0A
@@ -312,16 +312,17 @@
+(
'Title'
-:
+,
sel
@@ -328,16 +328,17 @@
lf.title
+)
,%0A
@@ -343,16 +343,17 @@
+(
'Author(
@@ -351,25 +351,25 @@
('Author(s)'
-:
+,
self.author
@@ -368,16 +368,17 @@
f.author
+)
,%0A
@@ -387,15 +387,16 @@
+(
'Date'
-:
+,
sel
@@ -401,16 +401,17 @@
elf.date
+)
,%0A
@@ -420,15 +420,16 @@
+(
'Tags'
-:
+,
sel
@@ -434,16 +434,17 @@
elf.tags
+)
,%0A
@@ -449,16 +449,17 @@
+(
'Collect
@@ -463,17 +463,17 @@
ections'
-:
+,
', '.jo
@@ -492,16 +492,17 @@
ections)
+)
,%0A
@@ -503,24 +503,25 @@
+(
'Publication
@@ -521,17 +521,17 @@
ication'
-:
+,
self.pu
@@ -539,16 +539,17 @@
lication
+)
,%0A
@@ -558,16 +558,17 @@
+(
'Issue'
-:
+,
sel
@@ -574,16 +574,17 @@
lf.issue
+)
,%0A
@@ -593,16 +593,17 @@
+(
'Volume'
: se
@@ -598,17 +598,17 @@
'Volume'
-:
+,
self.vo
@@ -611,16 +611,17 @@
f.volume
+)
,%0A
@@ -630,16 +630,17 @@
+(
'Pages'
-:
+,
sel
@@ -646,16 +646,17 @@
lf.pages
+)
,%0A
@@ -661,16 +661,17 @@
+(
'Publish
@@ -673,17 +673,17 @@
blisher'
-:
+,
self.pu
@@ -689,16 +689,17 @@
ublisher
+)
,%0A
@@ -704,16 +704,17 @@
+(
'Languag
@@ -715,17 +715,17 @@
anguage'
-:
+,
self.la
@@ -730,16 +730,17 @@
language
+)
,%0A
@@ -745,16 +745,17 @@
+(
'Abstrac
@@ -756,17 +756,17 @@
bstract'
-:
+,
self.ab
@@ -771,16 +771,17 @@
abstract
+)
,%0A
@@ -790,16 +790,17 @@
+(
'Notes'
-:
+,
sel
@@ -806,16 +806,17 @@
lf.notes
+)
,%0A
@@ -821,16 +821,17 @@
+(
'File(s)
@@ -831,17 +831,17 @@
File(s)'
-:
+,
self.fi
@@ -842,16 +842,17 @@
elf.file
+)
,%0A
@@ -861,14 +861,15 @@
+(
'URL'
-:
+,
sel
@@ -873,16 +873,17 @@
self.url
+)
,%0A
@@ -892,14 +892,15 @@
+(
'DOI'
-:
+,
sel
@@ -904,16 +904,17 @@
self.doi
+)
,%0A
@@ -923,15 +923,16 @@
+(
'ISBN'
-:
+,
sel
@@ -941,9 +941,20 @@
isbn
-%7D
+)%0A %5D)
%0A
|
e9ecc8036661811d6103afdf074853e1a0834213
|
Update tf-plan.py
|
tools/tf-plan.py
|
tools/tf-plan.py
|
#!/usr/bin/env python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import json
import shutil
import requests
from python_terraform import Terraform
def main(PR):
TOKEN = os.getenv('GITHUB_TOKEN')
GITHUB_WORKSPACE = os.getenv('GITHUB_WORKSPACE')
GITHUB_REPOSITORY = os.getenv('GITHUB_REPOSITORY')
# Get Added / Modified files in PR
modified_files, modified_files_raw, removed_files = pr_files(GITHUB_REPOSITORY, PR)
# Get Working directories to run TF Plan on
working_directories = get_updated_modules(modified_files, removed_files)
# Loop through all the identified working directories
# Deleting added/modified & removed files
try:
for dir in working_directories:
print("----------> RUN FOR: " + dir)
# Copying main directory
shutil.copytree(GITHUB_WORKSPACE+'/'+dir, os.getcwd()+'/temp/'+dir)
# Deleting added/modified & removed files
for mfile in modified_files:
if os.path.exists(os.getcwd()+'/temp/'+mfile):
print("Deleting file: " + mfile)
os.remove(os.getcwd()+'/temp/'+mfile)
for rfile in removed_files:
if os.path.exists(os.getcwd()+'/temp/'+rfile):
print("Deleting file: " + rfile)
os.remove(os.getcwd()+'/temp/'+rfile)
except requests.exceptions.RequestException as e:
print('No working directory with TF configs in PR.')
raise SystemExit(e)
# Loop through all the identified working directories
# Download added/modified files
try:
for dir in working_directories:
# Download added/modified files
for file in modified_files:
if dir in file:
for raw in modified_files_raw:
if file in raw:
print("Downloading file: " + raw)
downloadprfiles(raw, file, os.getcwd()+'/temp/'+dir)
break
except requests.exceptions.RequestException as e:
print('No working directory with TF configs in PR.')
raise SystemExit(e)
# Loop through all the identified working directories
# Run Terraform Plan
try:
for dir in working_directories:
comment, status = tf(os.getcwd() + '/temp/' + dir)
# commentpr(GITHUB_REPOSITORY, PR, comment, TOKEN)
if(status == 'fail'):
sys.exit('Terraform Init or Terraform Plan FAILED for: '+ dir)
except requests.exceptions.RequestException as e:
print('No working directory with TF configs in PR.')
raise SystemExit(e)
def pr_files(GITHUB_REPOSITORY,pr):
removed_files = []
modified_files = []
modified_files_raw = []
try:
response = requests.get('https://api.github.com/repos/'+ GITHUB_REPOSITORY +'/pulls/'+ str(pr) +'/files')
for file in response.json():
if(file['status'] == 'removed'):
print("Removed File: " + file['filename'])
removed_files.append(file['filename'])
else:
print("Added/Modified File: " + file['filename'])
modified_files.append(file['filename'])
modified_files_raw.append(file['raw_url'])
return modified_files, modified_files_raw, removed_files
except requests.exceptions.RequestException as e:
raise SystemExit(e)
def downloadprfiles(raw, file, path):
# print(path)
if not os.path.exists(path):
os.makedirs(path)
# print('Beginning file download with requests')
r = requests.get(raw)
with open(path + '/' + os.path.basename(file), 'wb') as f:
f.write(r.content)
# Retrieve HTTP meta-data
# print(r.status_code)
# print(r.headers['content-type'])
# print(r.encoding)
def get_updated_modules(modified_files, removed_files):
modified_files_dir = []
removed_files_dir = []
for file in modified_files:
modified_files_dir.append(os.path.dirname(file))
for file in removed_files:
removed_files_dir.append(os.path.dirname(file))
working_directories = modified_files_dir + removed_files_dir
working_directories = list(set(working_directories))
print("Working Directories:")
print(working_directories)
modules = [x for x in working_directories if x.startswith('module/')]
modules = [x for x in modules if x.count('/') == 1]
print("Modules Updated:")
print(modules)
return modules
def tf(dir):
tr = Terraform(working_dir=dir)
return_code_init, stdout_init, stderr_init = tr.init_cmd(capture_output=False)
return_code_plan, stdout_plan, stderr_plan = tr.plan_cmd(capture_output=False,var={'parent':'organizations/1234567890', 'billing_account':'ABCD-EFGH-IJKL-MNOP'})
if(return_code_init == 1):
comment = 'Terraform Init FAILED!\nFor Module: ' + dir.replace(os.getenv('TERRAFORM_CLI_PATH')+'/', '')
status = 'fail'
if(return_code_plan == 1):
comment = 'Terraform Plan FAILED!\nFor Module: ' + dir.replace(os.getenv('TERRAFORM_CLI_PATH')+'/', '')
status = 'fail'
else:
comment = 'Terraform Init & Terraform Plan SUCCESSFUL!\nFor Module: ' + dir.replace(os.getenv('TERRAFORM_CLI_PATH')+'/', '')
status = 'pass'
return comment, status
def commentpr(GITHUB_REPOSITORY, pr, comment, TOKEN):
headers = {'Authorization': f'token {TOKEN}', 'Accept': 'application/vnd.github.v3+json'}
# print(comment)
data = {"body":comment}
try:
response = requests.post('https://api.github.com/repos/'+ GITHUB_REPOSITORY +'/issues/'+ str(pr) +'/comments', data=json.dumps(data), headers=headers)
# print(response.text)
except requests.exceptions.RequestException as e:
raise SystemExit(e)
if __name__ == '__main__':
if len(sys.argv) != 2:
raise SystemExit('No PR passed.')
main(sys.argv[1])
|
Python
| 0.000001 |
@@ -4690,16 +4690,17 @@
('module
+s
/')%5D%0A m
|
7c787829b9c894f5c0de6dd50d6144e423d70f5c
|
uses ancient M2Crypto
|
python/cvmfs/certificate.py
|
python/cvmfs/certificate.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created by René Meusel
This file is part of the CernVM File System auxiliary tools.
"""
from M2Crypto import X509
from _common import CompressedObject
class Certificate(CompressedObject):
""" Wraps an X.509 certificate object as stored in CVMFS repositories """
def __init__(self, certificate_file):
CompressedObject.__init__(self, certificate_file)
cert = X509.load_cert_string(self.get_uncompressed_file().read())
self.openssl_certificate = cert
def __str__(self):
return "<Certificate " + self.get_fingerprint() + ">"
def __repr__(self):
return self.__str__()
def get_openssl_certificate(self):
""" return the certificate as M2Crypto.X509 object """
return self.openssl_certificate
def get_fingerprint(self, algorithm='sha1'):
""" returns the fingerprint of the X509 certificate """
fp = self.openssl_certificate.get_fingerprint(algorithm)
return ':'.join([ x + y for x, y in zip(fp[0::2], fp[1::2]) ])
def verify(self, signature, message):
""" verify a given signature to an expected 'message' string """
pubkey = self.openssl_certificate.get_pubkey()
pubkey.reset_context(md='sha1')
pubkey.verify_init()
pubkey.verify_update(message)
return pubkey.verify_final(signature)
|
Python
| 0.999944 |
@@ -132,16 +132,32 @@
s.%0A%22%22%22%0A%0A
+import M2Crypto%0A
from M2C
@@ -169,20 +169,93 @@
import
-X509
+EVP, X509, m2, util%0Afrom distutils.version import LooseVersion, StrictVersion
%0A%0Afrom _
@@ -902,16 +902,824 @@
icate%0A%0A%0A
+ def _get_fingerprint(self, algorithm='sha1'):%0A %22%22%22 Workaround for RHEL5 with ancient version of M2Crypto %22%22%22%0A if LooseVersion(M2Crypto.version) %3C StrictVersion(%220.17%22):%0A der = self.openssl_certificate.as_der()%0A md = EVP.MessageDigest(algorithm)%0A md.update(der)%0A digest = md.final()%0A return hex(util.octx_to_num(digest))%5B2:-1%5D.upper()%0A else:%0A return self.openssl_certificate.get_fingerprint()%0A%0A def _check_signature(self, pubkey, signature):%0A %22%22%22 Workaround for RHEL5 with ancient version of M2Crypto %22%22%22%0A if LooseVersion(M2Crypto.version) %3C StrictVersion(%220.18%22):%0A return m2.verify_final(pubkey.ctx, signature, pubkey.pkey)%0A else:%0A return pubkey.verify_final(signature)%0A%0A
def
@@ -1841,36 +1841,17 @@
= self.
-openssl_certificate.
+_
get_fing
@@ -1858,25 +1858,16 @@
erprint(
-algorithm
)%0A
@@ -2225,36 +2225,46 @@
return
-pubkey.verify_final(
+self._check_signature(pubkey,
signatur
|
92762566c0e80e24d8954b9a4b2f7d148a3c89da
|
Use google-hosted jquery for admin page
|
python/ecep/portal/admin.py
|
python/ecep/portal/admin.py
|
# Copyright (c) 2012 Azavea, Inc.
# See LICENSE in the project root for copying permission
from portal.models import Location
from django.contrib.gis import admin
from django import forms
from portal.widgets import MapWidget
from django.contrib.gis.geos import Point
import re
from django.conf import settings
class LocationForm(forms.ModelForm):
"""Form subclass for location model form to use custom widget for google map
and a custom clean method to properly handle points passed in as strings
"""
geom = forms.CharField(label="Geocoded Point", widget=MapWidget())
def get_point(self, geom_string):
"""Takes a geom_string from cleaned_data and converts it to a point
object. If unable to convert, raises a validation error.
Arguments:
- `geom_string`: string returned by the 'geom' input from the LocationForm
Takes the form of 'POINT (<LNG> <LAT>)'
"""
try:
split_geom_string = re.findall(r'([-.\w]+)', geom_string)
lng = float(split_geom_string[1])
lat = float(split_geom_string[2])
return Point(lng, lat)
except (IndexError, ValueError):
raise forms.ValidationError("Invalid point specified for location")
def clean(self):
"""
Need to create a Point object from string returned by form because
of the way the map fills in the geocoded location form
"""
self.cleaned_data = super(LocationForm, self).clean()
try:
self.cleaned_data['geom'] = self.get_point(self.cleaned_data['geom'])
return self.cleaned_data
except forms.ValidationError:
# Need to pass a dummy point if invalid, or we get a 500 error
# This point does not get saved, nothing happens to it
self.cleaned_data['geom'] = Point(0, 0)
raise forms.ValidationError("Invalid point specified for location")
class Meta:
model = Location
class LocationAdmin(admin.OSMGeoAdmin):
class Media:
css = { 'all': ('css/admin-map.css',)}
js = ('http://maps.googleapis.com/maps/api/js?key=%s&sensor=false&language=%s' % (settings.GOOGLE_MAPS_KEY, settings.LANGUAGE_CODE), 'js/admin-map.js', 'js/jquery.js')
list_display = ('site_name', 'address', 'zip', 'phone', 'id', )
list_filter = ('is_hs', 'is_ehs', 'accept_ccap', 'is_cps_based', 'is_community_based',
'is_age_lt_3', 'is_age_gt_3', 'is_full_day', 'is_full_week', 'is_full_year',
'is_part_day', 'is_part_week', 'is_school_year', 'is_home_visiting')
search_fields = ['site_name', 'address', 'zip', 'language_1', 'language_2', 'language_3']
readonly_fields = ['neighborhood']
form = LocationForm
fieldsets = [
(None, {'fields': ['site_name', 'neighborhood']}),
('Address', {'fields': [('address', 'city'), ('state', 'zip'), 'geom']}),
('Contact', {'fields': ['phone', 'url']}),
('Hours/Duration', {'fields': [('is_full_day', 'is_part_day'),
('is_full_week', 'is_part_week'),
('is_school_year', 'is_full_year')]}),
('Flags', {'fields': [('is_age_lt_3', 'is_age_gt_3'),
('is_community_based', 'is_cps_based'),
('is_hs', 'is_ehs'), 'accept_ccap']}),
('Other', {'fields': [('ages', 'prg_hours', 'accred'),
('language_1', 'language_2', 'language_3'),
'q_stmt']}),
]
admin.site.register(Location, LocationAdmin)
|
Python
| 0 |
@@ -2288,22 +2288,69 @@
s',
-'js/jquery.js'
+%22//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js%22
)%0A%0A
|
c86cac8c9d84d7cc797e7712c3fd5cdefeb2ca9d
|
Update normalize.py
|
pythainlp/util/normalize.py
|
pythainlp/util/normalize.py
|
# -*- coding: utf-8 -*-
"""
Text normalization
"""
import re
import warnings
from pythainlp import thai_tonemarks
_NORMALIZE_RULE1 = [
"ะ",
"ั",
"็",
"า",
"ิ",
"ี",
"ึ",
"่",
"ํ",
"ุ",
"ู",
"ใ",
"ไ",
"โ",
"ื",
"่",
"้",
"๋",
"๊",
"ึ",
"์",
"๋",
"ำ",
] # เก็บพวกสระ วรรณยุกต์ที่ซ้ำกันแล้วมีปัญหา
_NORMALIZE_RULE2 = [
("เเ", "แ"), # เ เ -> แ
("ํา", "ำ"), # นิคหิต + สระอา -> สระอำ
("ํ(t)า", "\\1ำ"),
("ํา(t)", "\\1ำ"),
("([่-๋])([ัิ-ื])", "\\2\\1"),
("([่-๋])([ูุ])", "\\2\\1"),
("ำ([่-๋])", "\\1ำ"),
("(์)([ัิ-ู])", "\\2\\1"),
] # เก็บพวก พิมพ์ลำดับผิดหรือผิดแป้นแต่กลับแสดงผลถูกต้อง ให้ไปเป็นแป้นที่ถูกต้อง เช่น เ + เ ไปเป็น แ
def normalize(text: str) -> str:
"""
This function normalize thai text with normalizing rules as follows:
* Remove redudant symbol of tones and vowels.
* Subsitute ["เ", "เ"] to "แ".
:param str text: thai text to be normalized
:return: normalized Thai text according to the fules
:rtype: str
:Example:
::
from pythainlp.util import normalize
normalize('สระะน้ำ')
# output: สระน้ำ
normalize('เเปลก')
# output: แปลก
normalize('นานาาา')
# output: นานา
"""
for data in _NORMALIZE_RULE2:
text = re.sub(data[0].replace("t", "[่้๊๋]"), data[1], text)
for data in list(zip(_NORMALIZE_RULE1, _NORMALIZE_RULE1)):
text = re.sub(data[0].replace("t", "[่้๊๋]") + "+", data[1], text)
return text
def delete_tone(text: str) -> str:
"""
This function removes Thai tonemarks from the text.
There are 4 tonemarks indicating 4 tones as follows:
* Down tone (Thai: ไม้เอก _่ )
* Falling tone (Thai: ไม้โท _้ )
* High tone (Thai: ไม้ตรี _๊ )
* Rising tone (Thai: ไม้จัตวา _๋ )
:param str text: text in Thai language
:return: text without Thai tonemarks
:rtype: str
:Example:
::
from pythainlp.util import delete_tone
delete_tone('สองพันหนึ่งร้อยสี่สิบเจ็ดล้านสี่แสนแปดหมื่นสามพันหกร้อยสี่สิบเจ็ด')
# output: สองพันหนึงรอยสีสิบเจ็ดลานสีแสนแปดหมืนสามพันหกรอยสีสิบเจ็ด
"""
chars = [ch for ch in text if ch not in thai_tonemarks]
return "".join(chars)
def deletetone(text: str) -> str:
warnings.warn(
"deletetone is deprecated, use delete_tone instead", DeprecationWarning
)
return delete_tone(text)
|
Python
| 0.000002 |
@@ -452,16 +452,17 @@
%22, %22%E0%B8%B3%22),
+
# %E0%B8%99%E0%B8%B4%E0%B8%84%E0%B8%AB%E0%B8%B4
@@ -479,17 +479,16 @@
-%3E %E0%B8%AA%E0%B8%A3%E0%B8%B0%E0%B8%AD%E0%B8%B3
-
%0A (%22%E0%B9%8D
|
20fa992576a80dceba5cee4a50c50f620362b5a2
|
include request body into VespaResult
|
python/vespa/vespa/query.py
|
python/vespa/vespa/query.py
|
# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
from typing import Callable, List, Optional, Dict
#
# Match phase
#
class MatchFilter(object):
"""
Abstract class for match filters.
"""
def create_match_filter(self, query: str) -> str:
"""
Create part of the YQL expression related to the filter.
:param query: Query input.
:return: Part of the YQL expression related to the filter.
"""
raise NotImplementedError
def get_query_properties(self, query: Optional[str] = None) -> Dict:
"""
Relevant request properties associated with the filter.
:param query: Query input.
:return: dict containing the relevant request properties associated with the filter.
"""
raise NotImplementedError
class AND(MatchFilter):
def __init__(self) -> None:
"""
Filter that match document containing all the query terms.
"""
super().__init__()
def create_match_filter(self, query: str) -> str:
return '(userInput("{}"))'.format(query)
def get_query_properties(self, query: Optional[str] = None) -> Dict:
return {}
class OR(MatchFilter):
def __init__(self) -> None:
"""
Filter that match any document containing at least one query term.
"""
super().__init__()
def create_match_filter(self, query: str) -> str:
return '([{{"grammar": "any"}}]userInput("{}"))'.format(query)
def get_query_properties(self, query: Optional[str] = None) -> Dict:
return {}
class WeakAnd(MatchFilter):
def __init__(self, hits: int, field: str = "default") -> None:
"""
Match documents according to the weakAND algorithm.
Reference: https://docs.vespa.ai/documentation/using-wand-with-vespa.html
:param hits: Lower bound on the number of hits to be retrieved.
:param field: Which Vespa field to search.
"""
super().__init__()
self.hits = hits
self.field = field
def create_match_filter(self, query: str) -> str:
query_tokens = query.split(" ")
terms = ", ".join(
['{} contains "{}"'.format(self.field, token) for token in query_tokens]
)
return '([{{"targetNumHits": {}}}]weakAnd({}))'.format(self.hits, terms)
def get_query_properties(self, query: Optional[str] = None) -> Dict:
return {}
class ANN(MatchFilter):
def __init__(
self,
doc_vector: str,
query_vector: str,
embedding_model: Callable[[str], List[float]],
hits: int,
label: str,
) -> None:
"""
Match documents according to the nearest neighbor operator.
Reference: https://docs.vespa.ai/documentation/reference/query-language-reference.html#nearestneighbor
:param doc_vector: Name of the document field to be used in the distance calculation.
:param query_vector: Name of the query field to be used in the distance calculation.
:param embedding_model: Model that takes query str as input and return list of floats as output.
:param hits: Lower bound on the number of hits to return.
:param label: A label to identify this specific operator instance.
"""
super().__init__()
self.doc_vector = doc_vector
self.query_vector = query_vector
self.embedding_model = embedding_model
self.hits = hits
self.label = label
def create_match_filter(self, query: str) -> str:
return '([{{"targetNumHits": {}, "label": "{}"}}]nearestNeighbor({}, {}))'.format(
self.hits, self.label, self.doc_vector, self.query_vector
)
def get_query_properties(self, query: Optional[str] = None) -> Dict[str, str]:
embedding_vector = self.embedding_model(query)
return {
"ranking.features.query({})".format(self.query_vector): str(
embedding_vector
)
}
class Union(MatchFilter):
def __init__(self, *args: MatchFilter) -> None:
"""
Match documents that belongs to the union of many match filters.
:param args: Match filters to be taken the union of.
"""
super().__init__()
self.operators = args
def create_match_filter(self, query: str) -> str:
match_filters = []
for operator in self.operators:
match_filter = operator.create_match_filter(query=query)
if match_filter is not None:
match_filters.append(match_filter)
return " or ".join(match_filters)
def get_query_properties(self, query: Optional[str] = None) -> Dict[str, str]:
query_properties = {}
for operator in self.operators:
query_properties.update(operator.get_query_properties(query=query))
return query_properties
#
# Ranking phase
#
class RankProfile(object):
def __init__(self, name: str = "default", list_features: bool = False) -> None:
"""
Define a rank profile.
:param name: Name of the rank profile as defined in a Vespa search definition.
:param list_features: Should the ranking features be returned. Either 'true' or 'false'.
"""
self.name = name
self.list_features = "false"
if list_features:
self.list_features = "true"
class Query(object):
def __init__(
self,
match_phase: MatchFilter = AND(),
rank_profile: RankProfile = RankProfile(),
) -> None:
"""
Define a query model.
:param match_phase: Define the match criteria. One of the MatchFilter options available.
:param rank_profile: Define the rank criteria.
"""
self.match_phase = match_phase
self.rank_profile = rank_profile
def create_body(self, query: str) -> Dict[str, str]:
"""
Create the appropriate request body to be sent to Vespa.
:param query: Query input.
:return: dict representing the request body.
"""
match_filter = self.match_phase.create_match_filter(query=query)
query_properties = self.match_phase.get_query_properties(query=query)
body = {
"yql": "select * from sources * where {};".format(match_filter),
"ranking": {
"profile": self.rank_profile.name,
"listFeatures": self.rank_profile.list_features,
},
}
body.update(query_properties)
return body
class VespaResult(object):
def __init__(self, vespa_result):
self.vespa_result = vespa_result
@property
def json(self) -> Dict:
return self.vespa_result
@property
def hits(self) -> List:
return self.vespa_result.get("root", {}).get("children", [])
|
Python
| 0 |
@@ -6675,16 +6675,35 @@
a_result
+, request_body=None
):%0A
@@ -6706,24 +6706,25 @@
self.
+_
vespa_result
@@ -6730,32 +6730,169 @@
t = vespa_result
+%0A self._request_body = request_body%0A%0A @property%0A def request_body(self) -%3E Optional%5BDict%5D:%0A return self._request_body
%0A%0A @property%0A
@@ -6931,32 +6931,33 @@
return self.
+_
vespa_result%0A%0A
@@ -6996,16 +6996,16 @@
%3E List:%0A
-
@@ -7016,16 +7016,17 @@
rn self.
+_
vespa_re
|
7fa8db8b52f6d066dc369912b0f9b227d71fa939
|
Fix wex.value docsring
|
wex/value.py
|
wex/value.py
|
""" Extracted data values are represented with tab-separated fields.
The right-most field on each line is the value, all preceding fields
are labels that describe the value.
The labels and the value are all JSON encoded.
So for example, a value 9.99 with a labels ``product`` and ``price`` would
look like::
"product"\t"price"\t9.99\n
And we could decode this line with the following Python snippet:
.. code-block:: pycon
>>> import json
>>> line = '"product"\\t"price"\\t9.99\\n'
>>> [json.loads(s) for s in line.split('\t')]
[u'product', u'price', 9.99]
Using tab-delimiters is convenient for downstream processing using Unix
command line tools such as :command:`cut` and :command:`grep`.
"""
import sys
from types import GeneratorType
from json import JSONEncoder
from functools import partial
from operator import itemgetter
from six import PY2, text_type
from six.moves import map
import logging; logger = logging.getLogger(__name__)
TAB = '\t'
NL = '\n'
if PY2:
JSONEncoder = partial(JSONEncoder, encoding='UTF-8')
json_encode = JSONEncoder(
skipkeys=False,
check_circular=True,
allow_nan=True,
indent=None,
separators=(',', ':'),
default=None,
sort_keys=True,
# may need to make this an argument at some point,
# but for now let's assume UTF-8 is ok on the output.
ensure_ascii=False,
).encode
class Value(tuple):
exit_on_exc = False
debug_on_exc = False
value = property(itemgetter(-1))
labels = property(itemgetter(slice(0, -1)))
def __new__(cls, value=(None,)):
if not isinstance(value, tuple):
value = (value,)
return super(Value, cls).__new__(cls, value)
def text(self):
""" Returns the text this value as a labelled JSON line. """
encoded = []
for field in self:
try:
encoded.append(json_encode(field))
except TypeError:
encoded.append('#' + text_type(repr(self.value)) + '!')
return TAB.join(encoded) + NL
def label(self, *labels):
""" Adds zero or more labels to this value. """
return self.__class__(tuple(map(text_type, labels)) + self)
def yield_values(extract, *args, **kw):
""" Yields ``Value`` objects extracted using ``extract``. """
exc_info = ()
try:
res = extract(*args, **kw)
if type(res) is GeneratorType:
for val in res:
yield Value(val)
else:
yield Value(res)
except Exception as exc:
exc_info = sys.exc_info()
yield Value(exc)
if any(exc_info) and (Value.exit_on_exc or Value.debug_on_exc):
if Value.debug_on_exc:
import pdb
pdb.post_mortem(exc_info[2])
else:
raise exc_info[0], exc_info[1], exc_info[2]
|
Python
| 0.000017 |
@@ -537,16 +537,17 @@
split('%5C
+%5C
t')%5D%0A
|
bf044427c7df101d11d779b1935f728ba93a0e75
|
Fix default logger in command runner.
|
src/main/python/apache/aurora/client/api/command_runner.py
|
src/main/python/apache/aurora/client/api/command_runner.py
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import logging
import posixpath
import subprocess
from multiprocessing.pool import ThreadPool
from pystachio import Environment, Required, String
from twitter.common import log
from apache.aurora.client.api import AuroraClientAPI
from apache.aurora.common.cluster import Cluster
from apache.aurora.config.schema.base import MesosContext
from apache.thermos.config.schema import ThermosContext
from gen.apache.aurora.api.constants import LIVE_STATES
from gen.apache.aurora.api.ttypes import Identity, ResponseCode, TaskQuery
class CommandRunnerTrait(Cluster.Trait):
slave_root = Required(String) # noqa
slave_run_directory = Required(String) # noqa
class DistributedCommandRunner(object):
@classmethod
def make_executor_path(cls, cluster, executor_name):
parameters = cls.sandbox_args(cluster)
parameters.update(executor_name=executor_name)
return posixpath.join(
'%(slave_root)s',
'slaves/*/frameworks/*/executors/%(executor_name)s/runs',
'%(slave_run_directory)s'
) % parameters
@classmethod
def thermos_sandbox(cls, cluster, executor_sandbox=False):
sandbox = cls.make_executor_path(cluster, 'thermos-{{thermos.task_id}}')
return sandbox if executor_sandbox else posixpath.join(sandbox, 'sandbox')
@classmethod
def sandbox_args(cls, cluster):
cluster = cluster.with_trait(CommandRunnerTrait)
return {'slave_root': cluster.slave_root, 'slave_run_directory': cluster.slave_run_directory}
@classmethod
def substitute_thermos(cls, command, task, cluster, **kw):
prefix_command = 'cd %s;' % cls.thermos_sandbox(cluster, **kw)
thermos_namespace = ThermosContext(
task_id=task.assignedTask.taskId,
ports=task.assignedTask.assignedPorts)
mesos_namespace = MesosContext(instance=task.assignedTask.instanceId)
command = String(prefix_command + command) % Environment(
thermos=thermos_namespace,
mesos=mesos_namespace)
return command.get()
@classmethod
def aurora_sandbox(cls, cluster, executor_sandbox=False):
if executor_sandbox:
return cls.make_executor_path(cluster, 'twitter')
else:
return '/var/run/nexus/%task_id%/sandbox'
@classmethod
def substitute_aurora(cls, command, task, cluster, **kw):
command = ('cd %s;' % cls.aurora_sandbox(cluster, **kw)) + command
command = command.replace('%shard_id%', str(task.assignedTask.instanceId))
command = command.replace('%task_id%', task.assignedTask.taskId)
for name, port in task.assignedTask.assignedPorts.items():
command = command.replace('%port:' + name + '%', str(port))
return command
@classmethod
def substitute(cls, command, task, cluster, **kw):
if task.assignedTask.task.executorConfig:
return cls.substitute_thermos(command, task, cluster, **kw)
else:
return cls.substitute_aurora(command, task, cluster, **kw)
@classmethod
def query_from(cls, role, env, job):
return TaskQuery(statuses=LIVE_STATES, owner=Identity(role), jobName=job, environment=env)
def __init__(self, cluster, role, env, jobs, ssh_user=None,
log_fn=log.error):
self._cluster = cluster
self._api = AuroraClientAPI(cluster=cluster)
self._role = role
self._env = env
self._jobs = jobs
self._ssh_user = ssh_user if ssh_user else self._role
self._log = log_fn
def execute(self, args):
hostname, role, command = args
ssh_command = ['ssh', '-n', '-q', '%s@%s' % (role, hostname), command]
self._log(logging.DEBUG, "Running command: %s" % ssh_command)
po = subprocess.Popen(ssh_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = po.communicate()
return '\n'.join('%s: %s' % (hostname, line) for line in output[0].splitlines())
def resolve(self):
for job in self._jobs:
resp = self._api.query(self.query_from(self._role, self._env, job))
if resp.responseCode != ResponseCode.OK:
self._log(logging.ERROR, 'Failed to query job: %s' % job)
continue
for task in resp.result.scheduleStatusResult.tasks:
yield task
def process_arguments(self, command, **kw):
for task in self.resolve():
host = task.assignedTask.slaveHost
yield (host, self._ssh_user, self.substitute(command, task, self._cluster, **kw))
def run(self, command, parallelism=1, **kw):
threadpool = ThreadPool(processes=parallelism)
for result in threadpool.imap_unordered(self.execute, self.process_arguments(command, **kw)):
print(result)
class InstanceDistributedCommandRunner(DistributedCommandRunner):
"""A distributed command runner that only runs on specified instances of a job."""
@classmethod
def query_from(cls, role, env, job, instances=None):
return TaskQuery(
statuses=LIVE_STATES,
owner=Identity(role),
jobName=job,
environment=env,
instanceIds=instances)
def __init__(self, cluster, role, env, job, ssh_user=None, instances=None, log_fn=logging.log):
super(InstanceDistributedCommandRunner, self).__init__(cluster, role, env, [job], ssh_user,
log_fn)
self._job = job
self._ssh_user = ssh_user if ssh_user else self._role
self.instances = instances
def resolve(self):
resp = self._api.query(self.query_from(self._role, self._env, self._job, self.instances))
if resp.responseCode == ResponseCode.OK:
for task in resp.result.scheduleStatusResult.tasks:
yield task
else:
self._log(logging.ERROR,
"Error: could not retrieve task information for run command: %s" % resp.messageDEPRECATED)
raise ValueError("Could not retrieve task information: %s" % resp.messageDEPRECATED)
|
Python
| 0 |
@@ -3710,13 +3710,11 @@
log.
-error
+log
):%0A
|
23f09ac800b40d54b2bbcded0ae57c3b2368b4be
|
Improve navigate path for series detail page
|
www/views.py
|
www/views.py
|
from django.shortcuts import render
from django.template import Context
from django.http import HttpResponse, Http404
from django.core.urlresolvers import reverse
import api
from mod import dispatch_module_hook
PAGE_SIZE = 50
def render_page(request, template_name, **data):
dispatch_module_hook("render_page_hook", context_data=data)
return render(request, template_name, context=data)
def prepare_message(m):
name, addr = m.get_sender()
m.sender_full_name = "%s <%s>" % (name, addr)
m.sender_display_name = name or addr
m.url = "/%s/%s" % (m.project.name, m.message_id)
m.status_tags = []
if m.is_series_head:
m.num_patches = len(m.get_patches())
if m.get_num():
m.total_patches = m.get_num()[1] or 1
else:
m.total_patches = 1
if m.num_patches < m.total_patches:
m.status_tags.append({
"title": "Series not complete (%d patches not received)" % \
(m.total_patches - m.num_patches),
"type": "warning",
"char": "P",
})
m.extra_info = []
m.extra_headers = []
dispatch_module_hook("prepare_message_hook", message=m)
return m
def prepare_series(s):
r = []
def add_msg_recurse(m, depth=0):
a = prepare_message(m)
a.indent_level = min(depth, 4)
r.append(prepare_message(m))
replies = m.get_replies()
non_patches = [x for x in replies if not x.is_patch]
patches = [x for x in replies if x.is_patch]
for x in non_patches + patches:
add_msg_recurse(x, depth+1)
return r
add_msg_recurse(s)
return r
def prepare_series_list(sl):
return [prepare_message(s) for s in sl]
def prepare_projects():
return api.models.Project.objects.all()
def view_project_list(request):
return render_page(request, "project-list.html", projects=prepare_projects)
def gen_page_links(total, cur_page, pagesize):
max_page = (total + pagesize - 1) / pagesize
ret = []
ddd = False
for i in range(1, max_page + 1):
if i == cur_page:
ret.append({
"title": str(i),
"url": "?page=" + str(i),
"class": "active",
"url": "#"
})
ddd = False
elif i < 10 or abs(i - cur_page) < 3 or max_page - i < 3:
ret.append({
"title": str(i),
"url": "?page=" + str(i),
})
ddd = False
else:
if not ddd:
ret.append({
"title": '...',
"class": "disabled",
"url": "#"
})
ddd = True
return ret
def get_page_from_request(request):
try:
return int(request.GET["page"])
except:
return 1
def prepare_navigate_list(cur, *path):
""" each path is (view_name, kwargs, title) """
r = [{"url": reverse("project_list"),
"title": "Projects"}]
for it in path:
r.append({"url": reverse(it[0], kwargs=it[1]),
"title": it[2]})
r.append({"title": cur})
return r
def render_series_list_page(request, query, search, project=None, keywords=[]):
sort = request.GET.get("sort")
if sort == "replied":
sortfield = "-last_reply_date"
order_by_reply = True
else:
sortfield = "-date"
order_by_reply = False
if sortfield:
query = query.order_by(sortfield)
cur_page = get_page_from_request(request)
start = (cur_page - 1) * PAGE_SIZE
series = query[start:start + PAGE_SIZE]
page_links = gen_page_links(query.count(), cur_page, PAGE_SIZE)
if project:
nav_path = prepare_navigate_list("Patches",
("project_detail", {"project": project}, project))
else:
nav_path = prepare_navigate_list('search "%s"' % search)
return render_page(request, 'series-list.html',
series=prepare_series_list(series),
page_links=page_links,
search=search,
keywords=keywords,
project_column=project==None,
order_by_reply=order_by_reply,
navigate_links=nav_path)
def view_search_help(request):
from markdown import markdown
nav_path = prepare_navigate_list("Search help")
return render_page(request, 'search-help.html',
navigate_links=nav_path,
search_help_doc=markdown(api.search.SearchEngine.__doc__))
def view_project_detail(request, project):
po = api.models.Project.objects.filter(name=project).first()
if not po:
raise Http404("Project not found")
nav_path = prepare_navigate_list("Information",
("project_detail", {"project": project}, project))
return render_page(request, "project-detail.html",
project=po,
navigate_links=nav_path,
search="")
def view_search(request):
from api.search import SearchEngine
search = request.GET.get("q", "").strip()
terms = [x.strip() for x in search.split(" ") if x]
se = SearchEngine()
query = se.search_series(*terms)
return render_series_list_page(request, query, search,
keywords=se.last_keywords())
def view_series_list(request, project):
if not api.models.Project.has_project(project):
raise Http404("Project not found")
search = "project:%s" % project
query = api.models.Message.objects.series_heads(project)
return render_series_list_page(request, query, search, project=project)
def view_series_mbox(request, project, message_id):
s = api.models.Message.objects.find_series(message_id, project)
if not s:
raise Http404("Series not found")
r = prepare_series(s)
mbox = "\n".join([x.get_mbox() for x in r])
return HttpResponse(mbox, content_type="text/plain")
def view_series_detail(request, project, message_id):
s = api.models.Message.objects.find_series(message_id, project)
if not s:
raise Http404("Series not found")
nav_path = prepare_navigate_list(s.message_id,
("series_list", {"project": project}, project))
search = "id:" + message_id
ops = []
dispatch_module_hook("www_series_operations_hook",
request=request,
series=s,
operations=ops)
return render_page(request, 'series-detail.html',
series=prepare_message(s),
project=project,
navigate_links=nav_path,
search=search,
series_operations=ops,
messages=prepare_series(s))
|
Python
| 0 |
@@ -6346,16 +6346,87 @@
age_id,%0A
+ (%22project_detail%22, %7B%22project%22: project%7D, project),%0A
@@ -6471,31 +6471,33 @@
: project%7D,
-project
+%22Patches%22
))%0A searc
|
fb786e6fa254bf9b041b58ae3ba524257892bea8
|
Make payloads larger for tests.
|
timelines.py
|
timelines.py
|
from sentry.utils.runner import configure
configure()
import contextlib
import functools
import logging
import random
import sys
import time
import uuid
from sentry.app import timelines
from sentry.timelines.redis import Record
logging.basicConfig(level=logging.DEBUG)
@contextlib.contextmanager
def timer(preamble):
start = time.time()
yield
print '{0} in {1} ms.'.format(preamble, (time.time() - start) * 1000)
# Load a bunch of records.
n_timelines = int(sys.argv[1])
n_records = int(sys.argv[2])
calls = []
with timer('Generated {0} records to be loaded into {1} timelines'.format(n_records, n_timelines)):
for i in xrange(0, n_records):
p = random.randint(1, n_timelines)
record = Record(uuid.uuid1().hex, 'payload', time.time())
calls.append(functools.partial(timelines.add, 'projects/{0}'.format(p), record))
with timer('Loaded {0} records'.format(len(calls))):
for call in calls:
call()
# Move them into the "ready" state.
ready = set()
with timer('Scheduled timelines for digestion'):
for chunk in timelines.schedule(time.time()):
for timeline, timestamp in chunk:
ready.add(timeline)
# Run them through the digestion process.
with timer('Digested {0} timelines'.format(len(ready))):
for timeline in ready:
with timelines.digest(timeline) as records:
i = 0
# Iterate through the records to ensure that all data is deserialized.
for i, record in enumerate(records, 1):
pass
# Run the scheduler again (using a future cutoff time to accomodate for backoff.)
ready.clear()
with timer('Scheduled timelines for digestion'):
for chunk in timelines.schedule(time.time() + timelines.backoff(1)):
for timeline, timestamp in chunk:
ready.add(timeline)
# Run them through the digestion process again (this should result in all of
# the items being taken out of the schedule.)
with timer('Digested {0} timelines'.format(len(ready))):
for timeline in ready:
with timelines.digest(timeline) as records:
i = 0
for i, record in enumerate(records, 1):
pass
# Check to make sure we're not leaking any data.
with timelines.cluster.all() as client:
result = client.keys('*')
for host, value in result.value.iteritems():
assert not value
|
Python
| 0.000001 |
@@ -514,16 +514,38 @@
argv%5B2%5D)
+%0Apayload = ' ' * 12000
%0A%0Acalls
@@ -774,17 +774,15 @@
ex,
-'
payload
-'
, ti
|
4e3ebcf98e2bfb2cea1f92b66e5205194744482a
|
add level 11
|
pythonchallenge/level_11.py
|
pythonchallenge/level_11.py
|
import unittest
import urllib
import requests
import logging
import re
import urllib
import os
import os.path
import Image
import ImageDraw
# Default is warning, it's to suppress requests INFO log
logging.basicConfig(format='%(message)s')
def solution():
url = 'http://www.pythonchallenge.com/pc/return/cave.jpg'
urllib.urlretrieve(url, 'cave.jpg')
image_file = Image.open('cave.jpg')
new_image = Image.new('RGB', (640, 480), 'black')
new_image_stroke = ImageDraw.Draw(new_image)
for y in range(480):
for x in range(640):
if y % 2 == 0 and x % 2 == 0 or y % 2 == 1 and x % 2 == 1:
new_image.putpixel((x, y), image_file.getpixel(x, y))
new_image.save('cave_edited.jpg')
return 'evil'
class SolutionTest(unittest.TestCase):
def setUp(self):
self.prefix = "http://www.pythonchallenge.com/pc/return/"
self.suffix = ".html"
def test_solution(self):
actual = solution()
expected = 'evil'
cred = ('huge', 'file')
self.assertEquals(actual, expected)
origin_url = ''.join([self.prefix, 'evil', self.suffix])
try:
r = requests.get(origin_url, auth=cred)
except:
raise
self.assertTrue(r.ok)
next_entry = [re.sub(r'(.*)URL=(.*)\.html\"\>', r'\2', line)
for line in r.iter_lines() if re.match(r'.*URL.*', line)]
r.close()
if len(next_entry) != 0:
r = requests.get(
''.join([self.prefix, next_entry[0], self.suffix], auth=expected))
logging.warn('Level 12 is %s with %s' % (r.url, cred))
else:
logging.warn('Level 12 is %s with %s' % (origin_url, cred))
if __name__ == "__main__":
unittest.main(failfast=True)
|
Python
| 0.000496 |
@@ -132,16 +132,46 @@
mageDraw
+%0Afrom StringIO import StringIO
%0A%0A# Defa
@@ -351,42 +351,51 @@
-urllib.urlretrieve(url, 'cave.jpg'
+r = requests.get(url, auth=('huge', 'file')
)%0A
@@ -424,18 +424,27 @@
pen(
-'cave.jpg'
+StringIO(r.content)
)%0A
@@ -553,24 +553,25 @@
for y in
+x
range(480):%0A
@@ -587,16 +587,17 @@
or x in
+x
range(64
@@ -738,14 +738,16 @@
xel(
+(
x, y))
+)
%0A
@@ -927,16 +927,16 @@
eturn/%22%0A
-
@@ -958,16 +958,78 @@
.html%22%0A%0A
+ def tearDown(self):%0A os.remove('cave_edited.jpg')%0A%0A
def
|
fb5ad293c34387b1ab7b7b7df3aed3942fdd9282
|
Add default to max_places in proposal form
|
src/webapp/activities/forms.py
|
src/webapp/activities/forms.py
|
# -*- encoding: utf-8 -*-
from django import forms
class ActivitySubscribeForm(forms.Form):
id = forms.IntegerField(
min_value = 0, required=True,
widget = forms.HiddenInput,
)
title = forms.CharField(
max_length=100, required=True,
widget = forms.HiddenInput,
)
class ProposalForm(forms.Form):
title = forms.CharField(
max_length=100, required=True,
)
subtitle = forms.CharField(
required = False,
widget = forms.Textarea,
)
duration = forms.CharField(
max_length=50, required=True,
)
max_places = forms.IntegerField(
min_value = 0, required=True,
)
show_owners = forms.BooleanField(
initial = False, required = False,
)
requires_inscription = forms.BooleanField(
initial = False, required = False,
)
owners = forms.CharField(
required = False,
widget = forms.Textarea,
)
organizers = forms.CharField(
required = False,
widget = forms.Textarea,
)
text = forms.CharField(
required = False,
widget = forms.Textarea,
)
logistics = forms.CharField(
required = False,
widget = forms.Textarea,
)
notes_organization = forms.CharField(
required = False,
widget = forms.Textarea,
)
|
Python
| 0.000001 |
@@ -654,32 +654,45 @@
, required=True,
+ initial = 0,
%0A )%0A show_
|
447f24720757175f5a54291d8cb7f5aa3b0fab92
|
test LesionsAnalysis.valid attribute, not LesionAnalysis.valid_dataset
|
arfi_histology_analysis.py
|
arfi_histology_analysis.py
|
from lesion_analysis import LesionAnalysis
Ptotal = []
Pexact = []
Pnn = []
Patrophy = []
Pbph = []
Pmiss = []
Pclinsig = []
Pclinsigsens = []
Pfalsepositive = []
for p in range(56, 107):
P = LesionAnalysis(p)
if P.valid_dataset:
Ptotal.append(p)
if P.index_match['exact']:
Pexact.append(p)
if P.index_match['nn']:
Pnn.append(p)
else:
Pmiss.append(p)
if P.benign_match['atrophy']:
Patrophy.append(p)
if P.benign_match['bph']:
Pbph.append(p)
Pclinsig.append(P.clin_sig_match)
Pclinsigsens.append(P.clin_sig_sensitivity)
Pfalsepositive.append(P.false_positive)
PexactIOS = []
PexactGleason = []
for p in Pexact:
P = LesionAnalysis(p)
PexactIOS.append(P.arfi['index']['IOS'])
PexactGleason.append(P.histology['index']['Gleason'])
print "ARFI:HISTOLOGY ANALYSIS"
print "======================"
print "Valid Patients (%i): %s" % (len(Ptotal), Ptotal)
print "\nINDEX LESIONS"
print "============="
print "ARFI Sensitivity (Exact) = %i/%i (%.2f)" % (len(Pexact),
len(Ptotal),
float(len(Pexact)) /
float(len(Ptotal)))
print "ARFI Sensitivity (NN) = %i/%i (%.2f)" % (len(Pnn),
len(Ptotal),
float(len(Pnn)) /
float(len(Ptotal)))
print "Exact ARFI:Histology Matches:"
for i, x in enumerate(Pexact):
print '\t%s (IOS: %s, Gleason: %s)' % (x, PexactIOS[i], PexactGleason[i])
print "NN ARFI:Histology Matches: %s" % Pnn
print "Missed Cases: %s" % Pmiss
print "\nARFI LESIONS"
print "============"
ARFIclinsig = len([j for i in Pclinsig for j in i if j[0]])
ARFIposterior = len([j for i in Pclinsig for j in i if j[1] == 'posterior'])
ARFIanterior = len([j for i in Pclinsig for j in i if j[1] == 'anterior'])
ARFItotal = len([j for i in Pclinsig for j in i])
print "%i/%i (%.2f) were clinically significant lesions" % (ARFIclinsig,
ARFItotal,
float(ARFIclinsig) /
float(ARFItotal))
print "\t%i/%i (%.2f) read lesions were posterior" % (ARFIposterior,
ARFItotal,
float(ARFIposterior) /
float(ARFItotal))
print "\t%i/%i (%.2f) read lesions were anterior" % (ARFIanterior,
ARFItotal,
float(ARFIanterior) /
float(ARFItotal))
print "False ARFI reads:"
print "\tNon-clinically-significant PCA: %s" % [x for x in Pfalsepositive if x == 'pca']
print "\tAtrophy: %s" % [x for x in Pfalsepositive if x == 'atrophy']
print "\tBPH: %s" % [x for x in Pfalsepositive if x == 'bph']
print "\nCLINICALLY-SIGNIFICANT HISTOLOGY LESIONS"
print "========================================"
histclinsig = len([j for i in Pclinsigsens for j in i if j[0]])
histposterior = len([j for i in Pclinsigsens for j in i if j[1] == 'posterior'])
histanterior = len([j for i in Pclinsigsens for j in i if j[1] == 'anterior'])
histtotal = len([j for i in Pclinsigsens for j in i])
print "%i/%i (%.2f) of clinically-significant lesions were detected" % (histclinsig,
histtotal,
float(histclinsig) /
float(histtotal))
print "\t%i/%i (%.2f) of these lesions were posterior" % (histposterior,
histtotal,
float(histposterior) /
float(histtotal))
print "\t%i/%i (%.2f) of these lesions were anterior" % (histanterior,
histtotal,
float(histanterior) /
float(histtotal))
print "\nINDEX LESION BENIGN CONFOUNDERS"
print "==============================="
print "Atrophy: %s" % Patrophy
print "BPH: %s" % Pbph
|
Python
| 0.000003 |
@@ -227,16 +227,8 @@
alid
-_dataset
:%0A
|
3b5f322d8fe76251b322b2d81cecf6abbee5e4bd
|
rename python class method
|
python/dllib/src/bigdl/dllib/feature/image/imagePreprocessing.py
|
python/dllib/src/bigdl/dllib/feature/image/imagePreprocessing.py
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from bigdl.util.common import JavaValue
from bigdl.util.common import callBigDlFunc
from bigdl.util.common import *
from zoo.feature.common import Preprocessing
if sys.version >= '3':
long = int
unicode = str
class Resize(Preprocessing):
"""
image resize
"""
def __init__(self, resizeH, resizeW, bigdl_type="float"):
super(Resize, self).__init__(bigdl_type, resizeH, resizeW)
class ChannelNormalizer(Preprocessing):
"""
image norm
"""
def __init__(self, meanR, meanG, meanB, stdR, stdG, stdB, bigdl_type="float"):
super(ChannelNormalizer, self).__init__(bigdl_type, meanR, meanG, meanB, stdR, stdG, stdB)
class MatToTensor(Preprocessing):
"""
MatToTensor
"""
def __init__(self, bigdl_type="float"):
super(MatToTensor, self).__init__(bigdl_type)
class CenterCrop(Preprocessing):
"""
CenterCrop
"""
def __init__(self, cropWidth, cropHeight, bigdl_type="float"):
super(CenterCrop, self).__init__(bigdl_type, cropWidth, cropHeight)
|
Python
| 0.99888 |
@@ -1261,16 +1261,17 @@
stdB)%0A%0A
+%0A
class Ma
@@ -1427,16 +1427,17 @@
_type)%0A%0A
+%0A
class Ce
@@ -1634,16 +1634,9 @@
pHeight)
-
+%0A
|
6a84b885be67e8a9f424c2b36f50e8fe9347dbc9
|
Remove duplicate constant in ipmi.py
|
rackattack/physical/ipmi.py
|
rackattack/physical/ipmi.py
|
import subprocess
import time
import logging
import multiprocessing.pool
class IPMI:
IPMITOOL_FILENAME = "ipmitool"
_CONCURRENCY = 4
IPMITOOL_FILENAME = "ipmitool"
_pool = None
def __init__(self, hostname, username, password):
self._hostname = hostname
self._username = username
self._password = password
if IPMI._pool is None:
IPMI._pool = multiprocessing.pool.ThreadPool(self._CONCURRENCY)
def off(self):
IPMI._pool.apply_async(self._powerCommand, args=("off",))
def powerCycle(self):
IPMI._pool.apply_async(self._powerCycle)
def _powerCycle(self):
self._powerCommand("off")
self._powerCommand("on")
def _powerCommand(self, command):
NUMBER_OF_RETRIES = 10
cmdLine = [
self.IPMITOOL_FILENAME, "power", command,
"-H", str(self._hostname), "-U", self._username, "-P", self._password]
for i in xrange(NUMBER_OF_RETRIES - 1):
try:
return subprocess.check_output(cmdLine, stderr=subprocess.STDOUT, close_fds=True)
except:
time.sleep(0.1)
try:
return subprocess.check_output(cmdLine, stderr=subprocess.STDOUT, close_fds=True)
except subprocess.CalledProcessError as e:
logging.error("Output: %(output)s", dict(output=e.output))
raise
|
Python
| 0.002215 |
@@ -140,43 +140,8 @@
= 4%0A
- IPMITOOL_FILENAME = %22ipmitool%22%0A
|
356a7c4d83a5289e7b30a07b0f76829e274b7481
|
Fix Eventlet transport on Python 3
|
raven/transport/eventlet.py
|
raven/transport/eventlet.py
|
"""
raven.transport.eventlet
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import sys
from raven.transport.http import HTTPTransport
try:
import eventlet
from eventlet.green import urllib2 as eventlet_urllib2
has_eventlet = True
except:
has_eventlet = False
class EventletHTTPTransport(HTTPTransport):
scheme = ['eventlet+http', 'eventlet+https']
def __init__(self, parsed_url, pool_size=100, **kwargs):
if not has_eventlet:
raise ImportError('EventletHTTPTransport requires eventlet.')
super(EventletHTTPTransport, self).__init__(parsed_url, **kwargs)
# remove the eventlet+ from the protocol, as it is not a real protocol
self._url = self._url.split('+', 1)[-1]
def _send_payload(self, payload):
req = eventlet_urllib2.Request(self._url, headers=payload[1])
try:
if sys.version_info < (2, 6):
response = eventlet_urllib2.urlopen(req, payload[0]).read()
else:
response = eventlet_urllib2.urlopen(req, payload[0],
self.timeout).read()
return response
except Exception as err:
return err
def send(self, data, headers):
"""
Spawn an async request to a remote webserver.
"""
eventlet.spawn(self._send_payload, (data, headers))
|
Python
| 0.000002 |
@@ -298,16 +298,29 @@
ventlet%0A
+ try:%0A
from
@@ -370,16 +370,110 @@
urllib2%0A
+ except ImportError:%0A from eventlet.green.urllib import request as eventlet_urllib2%0A
has_
|
b30befbf39009ed566dbb7ff725de05bad2be990
|
Add link to permissions management doc for ExportTables. (#520)
|
rdr_client/export_tables.py
|
rdr_client/export_tables.py
|
# Exports the entire contents of database tables to Unicode CSV files stored in GCS.
# Used instead of Cloud SQL export because it handles newlines and null characters properly.
#
# Usage: ./run_client.sh --project <PROJECT> --account <ACCOUNT> \
# --service_account exporter@<PROJECT>.iam.gserviceaccount.com export_tables.py \
# --database rdr --tables code,participant --directory test_directory
#
# "directory" indicates a directory inside the GCS bucket to write the files to
#
# If "rdr" is chosen for the database, the data will be written to <ENVIRONMENT>-rdr-export;
# If "cdm" or "voc" are chosen, the data will be written to <ENVIRONMENT>-cdm.
import logging
from client import Client
from main_util import get_parser, configure_logging
def export_tables(client):
table_names = client.args.tables.split(',')
logging.info('Exporting %s from %s to %s' % (table_names, client.args.database,
client.args.directory))
request_body = {'database': client.args.database,
'tables': table_names,
'directory': client.args.directory}
response = client.request_json('ExportTables', 'POST', request_body)
logging.info('Data is being exported to: %s' % response['destination'])
if __name__ == '__main__':
configure_logging()
parser = get_parser()
parser.add_argument('--database', help='The database to export data from', required=True)
parser.add_argument('--tables', help='A comma-separated list of tables to export',
required=True)
parser.add_argument('--directory',
help='A directory to write CSV output to inside the GCS bucket',
required=True)
export_tables(Client(parser=parser, base_path='offline'))
|
Python
| 0 |
@@ -173,16 +173,143 @@
erly.%0A#%0A
+# Documentation of permissions management:%0A# https://docs.google.com/document/d/1vKiu2zcSy97DQTIuSezr030kTyeDthome9XzNy98B6M%0A#%0A
# Usage:
|
f210ef3e6b4122c75b4df9eee6be6ee4ac81efa4
|
Remove a useless table from the db
|
www/scripts/generate_db.py
|
www/scripts/generate_db.py
|
#!/usr/bin/python
# This script has to generate the sqlite database
#
# Requirements (import from):
# - sqlite3
#
# Syntax:
# ./generate_db.py
import sqlite3
import sys
from os import path
SCRIPT_PATH = path.dirname(__file__)
DEFAULT_DB = path.join(SCRIPT_PATH, "../mymoney.db")
def generate_tables(db=DEFAULT_DB):
conn = sqlite3.connect(db)
with conn:
c = conn.cursor()
# Drop tables if they exist
c.execute('''DROP TABLE IF EXISTS node''')
c.execute('''DROP TABLE IF EXISTS node_hierarchy''')
c.execute('''DROP TABLE IF EXISTS expense''')
c.execute('''DROP TABLE IF EXISTS node_expense''')
# Create tables
c.execute('''CREATE TABLE IF NOT EXISTS node (
id INTEGER PRIMARY KEY,
title TEXT NOT NULL)''')
c.execute('''CREATE TABLE IF NOT EXISTS node_hierarchy (
id INTEGER PRIMARY KEY,
parent_id INTEGER,
child_id INTEGER,
FOREIGN KEY(parent_id) REFERENCES node(id),
FOREIGN KEY(child_id) REFERENCES node(id),
UNIQUE(child_id))''')
c.execute('''CREATE TABLE IF NOT EXISTS expense (
id INTEGER PRIMARY KEY,
title TEXT NOT NULL,
date INTEGER NOT NULL,
value REAL NOT NULL)''')
c.execute('''CREATE TABLE IF NOT EXISTS node_expense (
expense_id INTEGER,
node_id INTEGER,
PRIMARY KEY(expense_id, node_id),
FOREIGN KEY(expense_id) REFERENCES expense(id),
FOREIGN KEY(node_id) REFERENCES node(id))''')
# Commit the changes
conn.commit()
if __name__ == '__main__':
generate_tables(DEFAULT_DB)
|
Python
| 0.000001 |
@@ -494,69 +494,8 @@
'')%0A
- c.execute('''DROP TABLE IF EXISTS node_hierarchy''')%0A
@@ -743,170 +743,8 @@
EY,%0A
- title TEXT NOT NULL)''')%0A c.execute('''CREATE TABLE IF NOT EXISTS node_hierarchy (%0A id INTEGER PRIMARY KEY,%0A
@@ -810,24 +810,27 @@
-child_id INTEGER
+title TEXT NOT NULL
,%0A
@@ -897,117 +897,8 @@
(id)
-,%0A FOREIGN KEY(child_id) REFERENCES node(id),%0A UNIQUE(child_id)
)'''
|
4c88b3f5daa1e8e147dedfa4a103216565469f93
|
Fix timezone-naive datetime being compared to aware datetime.
|
zerver/lib/logging_util.py
|
zerver/lib/logging_util.py
|
from __future__ import absolute_import
from django.utils import timezone
import hashlib
import logging
import traceback
from datetime import datetime, timedelta
from zerver.lib.str_utils import force_bytes
# Adapted http://djangosnippets.org/snippets/2242/ by user s29 (October 25, 2010)
class _RateLimitFilter(object):
last_error = datetime.min
def filter(self, record):
# type: (logging.LogRecord) -> bool
from django.conf import settings
from django.core.cache import cache
# Track duplicate errors
duplicate = False
rate = getattr(settings, '%s_LIMIT' % self.__class__.__name__.upper(),
600) # seconds
if rate > 0:
# Test if the cache works
try:
cache.set('RLF_TEST_KEY', 1, 1)
use_cache = cache.get('RLF_TEST_KEY') == 1
except Exception:
use_cache = False
if use_cache:
if record.exc_info is not None:
tb = force_bytes('\n'.join(traceback.format_exception(*record.exc_info)))
else:
tb = force_bytes(str(record))
key = self.__class__.__name__.upper() + hashlib.sha1(tb).hexdigest()
duplicate = cache.get(key) == 1
if not duplicate:
cache.set(key, 1, rate)
else:
min_date = timezone.now() - timedelta(seconds=rate)
duplicate = (self.last_error >= min_date)
if not duplicate:
self.last_error = timezone.now()
return not duplicate
class ZulipLimiter(_RateLimitFilter):
pass
class EmailLimiter(_RateLimitFilter):
pass
class ReturnTrue(logging.Filter):
def filter(self, record):
# type: (logging.LogRecord) -> bool
return True
class RequireReallyDeployed(logging.Filter):
def filter(self, record):
# type: (logging.LogRecord) -> bool
from django.conf import settings
return settings.PRODUCTION
def skip_200_and_304(record):
# type: (logging.LogRecord) -> bool
# Apparently, `status_code` is added by Django and is not an actual
# attribute of LogRecord; as a result, mypy throws an error if we
# access the `status_code` attribute directly.
if getattr(record, 'status_code') in [200, 304]:
return False
return True
def skip_site_packages_logs(record):
# type: (logging.LogRecord) -> bool
# This skips the log records that are generated from libraries
# installed in site packages.
# Workaround for https://code.djangoproject.com/ticket/26886
if 'site-packages' in record.pathname:
return False
return True
|
Python
| 0 |
@@ -346,16 +346,45 @@
time.min
+.replace(tzinfo=timezone.utc)
%0A%0A de
|
296fd12b86cfffa0d1c239c248d6a3efca4759c3
|
Remove debug log
|
zou/app/utils/thumbnail.py
|
zou/app/utils/thumbnail.py
|
import os
import shutil
import math
from zou.app.utils import fs
from PIL import Image
Image.MAX_IMAGE_PIXELS = 20000 * 20000
RECTANGLE_SIZE = 150, 100
SQUARE_SIZE = 100, 100
PREVIEW_SIZE = 1200, 0
BIG_SQUARE_SIZE = 400, 400
def save_file(tmp_folder, instance_id, file_to_save):
"""
Save file in given folder. The file must only be temporary saved via
this function.
"""
extension = "." + file_to_save.filename.split(".")[-1].lower()
file_name = instance_id + extension.lower()
file_path = os.path.join(tmp_folder, file_name)
file_to_save.save(file_path)
im = Image.open(file_path)
if im.mode == "CMYK":
im = im.convert("RGB")
im.save(file_path, "PNG")
return file_path
def convert_jpg_to_png(file_source_path):
"""
Convert .jpg file located at given path into a .png file with same name.
"""
folder_path = os.path.dirname(file_source_path)
file_source_name = os.path.basename(file_source_path)
file_target_name = "%s.png" % file_source_name[:-4]
file_target_path = os.path.join(folder_path, file_target_name)
im = Image.open(file_source_path)
if im.mode == "CMYK":
im = im.convert("RGB")
im.save(file_target_path, "PNG")
fs.rm_file(file_source_path)
return file_target_path
def get_file_name(instance_id):
"""
Build thumbnail file name for given id.
"""
return "%s.png" % instance_id
def get_full_size_from_width(im, width):
"""
From given width/g
"""
im_width, im_height = im.size
ratio = float(im_height) / float(im_width)
height = int(math.ceil(width * ratio))
return (width, height)
def make_im_bigger_if_needed(im, size):
im_width, im_height = im.size
width, height = size
if im_width < width and im_height < height:
im = im.resize(size, Image.ANTIALIAS)
return im
def fit_to_target_size(im, size):
"""
Make sure that the image is contained in the size given in parameter
(shorten width and/or height proporitionnally to expected ratio).
"""
im_width, im_height = im.size
width, height = size
w = width
h = height
original_ratio = float(im_width) / float(im_height)
target_ratio = float(width) / float(height)
if target_ratio != original_ratio:
w = height * original_ratio
if w > width:
w = width
h = int(math.ceil(float(width) / original_ratio))
im = im.resize((w, h), Image.ANTIALIAS)
return im
def turn_into_thumbnail(file_path, size=None):
"""
Turn given picture into a smaller version.
"""
im = Image.open(file_path)
if size is not None:
(width, height) = size
if height == 0:
size = get_full_size_from_width(im, width)
else:
size = im.size
im = make_im_bigger_if_needed(im, size)
im = fit_to_target_size(im, size)
print(size, im.size, "turn_into_thumbnail 2")
im.thumbnail(size, Image.LANCZOS)
if im.mode == "CMYK":
im = im.convert("RGB")
im.save(file_path, "PNG")
return file_path
def resize(file_path, size):
"""
Resize given picture
"""
im = Image.open(file_path)
im = im.resize(size, Image.ANTIALIAS)
if im.mode == "CMYK":
im = im.convert("RGB")
im.save(file_path, "PNG")
return file_path
def prepare_image_for_thumbnail(im, size):
"""
Crop image to avoid deformation while building the target thumbnail.
"""
im_width, im_height = im.size
width, height = size
original_ratio = float(im_width) / float(im_height)
target_ratio = float(width) / float(height)
if target_ratio > original_ratio:
# image is too tall: take some off the top and bottom
scale_factor = float(target_ratio) / float(original_ratio)
crop_width = im_width
crop_height = math.floor(float(im_height) / scale_factor)
top_cut_line = (im_height - crop_height) / 2
im = im.crop(
flat(0, top_cut_line, crop_width, top_cut_line + crop_height)
)
else:
# image is too wide: take some off the sides
scale_factor = float(original_ratio) / float(target_ratio)
crop_width = math.ceil(float(im_width) / scale_factor)
crop_height = im_height
side_cut_line = int(float(im_width - crop_width) / 2)
im = im.crop(
flat(side_cut_line, 0, side_cut_line + crop_width, crop_height)
)
return im
def generate_preview_variants(original_path, instance_id):
"""
Generate three thumbnails for given picture path.
1. Rectangle thumbnail
2. Square thumbnail
3. Big rectangle thumbnail
"""
file_name = get_file_name(instance_id)
variants = [
("thumbnails", RECTANGLE_SIZE),
("thumbnails-square", SQUARE_SIZE),
("previews", PREVIEW_SIZE),
]
result = []
for picture_data in variants:
(picture_type, size) = picture_data
folder_path = os.path.dirname(original_path)
picture_path = os.path.join(
folder_path, "%s-%s" % (picture_type, file_name)
)
shutil.copyfile(original_path, picture_path)
turn_into_thumbnail(picture_path, size)
result.append((picture_type, picture_path))
return result
def url_path(data_type, instance_id):
"""
Build thumbnail download path for given data type and instance ID.
"""
data_type = data_type.replace("_", "-")
return "pictures/thumbnails/%s/%s.png" % (data_type, instance_id)
def flat(*nums):
"""
Turn into an int tuple an a enumerable of numbers.
"""
return tuple(int(round(n)) for n in nums)
|
Python
| 0 |
@@ -2888,58 +2888,8 @@
ize)
-%0A print(size, im.size, %22turn_into_thumbnail 2%22)
%0A%0A
|
c02036f26bfd1eb6b1fed2dc10c73c91e97dae0b
|
Update __init__.py
|
tendrl/node_agent/objects/cluster_message/__init__.py
|
tendrl/node_agent/objects/cluster_message/__init__.py
|
from tendrl.commons import etcdobj
from tendrl.commons.message import Message as message
from tendrl.commons import objects
class ClusterMessage(message, objects.BaseObject):
internal = True
def __init__(self, **cluster_message):
self._defs = {}
super(ClusterMessage, self).__init__(**cluster_message)
self.value = 'clusters/%s/Messages/%s'
self._etcd_cls = _ClusterMessageEtcd
class _ClusterMessageEtcd(etcdobj.EtcdObj):
"""Cluster message object, lazily updated
"""
__name__ = 'clusters/%s/Messages/%s'
_tendrl_cls = ClusterMessage
def render(self):
self.__name__ = self.__name__ % (
self.cluster_id, self.message_id
)
return super(_ClusterMessageEtcd, self).render()
|
Python
| 0.000072 |
@@ -144,17 +144,8 @@
age(
-message,
obje
@@ -158,16 +158,25 @@
seObject
+, message
):%0A i
@@ -269,35 +269,15 @@
-super(ClusterMessage, self)
+message
.__i
@@ -282,16 +282,22 @@
_init__(
+self,
**cluste
@@ -307,16 +307,66 @@
essage)%0A
+ objects.BaseObject.__init__(self)%0A
%0A
|
551dddbb80d512ec49d8a422b52c24e98c97b38c
|
Add waiting for new data to parse
|
tsparser/main.py
|
tsparser/main.py
|
from tsparser import config
from tsparser.parser import BaseParser, ParseException
from tsparser.parser.gps import GPSParser
from tsparser.parser.imu import IMUParser
from tsparser.sender import Sender
def parse(input_file=None):
"""
Parse the file specified as input.
:param input_file: file to read input from. If None, then pipe specified
in config is used
:type input_file: file
"""
Sender(daemon=True).start()
if input_file is None:
input_file = open(config.PIPE_NAME, 'r')
parsers = _get_parsers()
while True:
line = input_file.readline()
if not line:
continue
_parse_line(parsers, line)
def _get_parsers():
return [
IMUParser(),
GPSParser()
]
def _parse_line(parsers, line):
values = line.split(',')
BaseParser.timestamp = values.pop().strip()
for parser in parsers:
if parser.parse(line, *values):
break
else:
raise ParseException('Output line was not parsed by any parser: {}'
.format(line))
|
Python
| 0 |
@@ -1,20 +1,43 @@
+from time import sleep%0A
from tsparser import
@@ -642,24 +642,48 @@
f not line:%0A
+ sleep(0.01)%0A
|
787db3ccc3d63d408a9b081a376343805b887368
|
Allow using VERBOSE env var to enable debug logs
|
tsrc/cli/main.py
|
tsrc/cli/main.py
|
""" Main tsrc entry point """
import argparse
import functools
import importlib
import sys
import textwrap
import colored_traceback
import ui
import tsrc
def fix_cmd_args_for_foreach(args, foreach_parser):
""" We want to support both:
$ tsrc foreach -c 'shell command'
and
$ tsrc foreach -- some-cmd --some-opts
Due to argparse limitations, args.cmd will always be
a list, but we nee a *string* when using 'shell=True'
So transform the argparse.Namespace object to have
* args.cmd suitable to pass to subprocess later
* args.cmd_as_str suitable for display purposes
"""
def die(message):
ui.error(message)
print(foreach_parser.epilog, end="")
sys.exit(1)
if args.shell:
if len(args.cmd) != 1:
die("foreach -c must be followed by exactly one argument")
cmd = args.cmd[0]
cmd_as_str = cmd
else:
cmd = args.cmd
if not cmd:
die("needs a command to run")
cmd_as_str = " ".join(cmd)
args.cmd = cmd
args.cmd_as_str = cmd_as_str
def workspace_subparser(subparser, name):
parser = subparser.add_parser(name)
parser.add_argument("-w", "--workspace", dest="workspace_path")
return parser
def main_wrapper(main_func):
""" Wraps main() entry point to better deal with errors """
@functools.wraps(main_func)
def wrapped(args=None):
colored_traceback.add_hook()
try:
main_func(args=args)
except tsrc.Error as e:
# "expected" failure, display it and exit
if e.message:
ui.error(e.message)
sys.exit(1)
except KeyboardInterrupt:
ui.warning("Interrupted by user, quitting")
sys.exit(1)
return wrapped
@main_wrapper
def main(args=None):
parser = argparse.ArgumentParser()
parser.add_argument("--verbose", help="Show debug messages",
action="store_true")
parser.add_argument("-q", "--quiet", help="Only display warnings and errors",
action="store_true")
parser.add_argument("--color", choices=["auto", "always", "never"])
subparsers = parser.add_subparsers(title="subcommands", dest="command")
subparsers.add_parser("version")
foreach_parser = workspace_subparser(subparsers, "foreach")
foreach_parser.add_argument("cmd", nargs="*")
foreach_parser.add_argument("-c", dest="shell", action="store_true")
foreach_parser.epilog = textwrap.dedent("""\
Usage:
# Run command directly
tsrc foreach -- some-cmd --with-option
Or:
# Run command through the shell
tsrc foreach -c 'some cmd'
""")
foreach_parser.formatter_class = argparse.RawDescriptionHelpFormatter
init_parser = workspace_subparser(subparsers, "init")
init_parser.add_argument("manifest_url", nargs="?")
init_parser.add_argument("-b", "--branch")
init_parser.add_argument("-g", "--group", action="append", dest="groups")
init_parser.add_argument("-s", "--shallow", action="store_true", dest="shallow", default=False)
init_parser.set_defaults(branch="master")
log_parser = workspace_subparser(subparsers, "log")
log_parser.add_argument("--from", required=True, dest="from_", metavar="FROM")
log_parser.add_argument("--to")
log_parser.set_defaults(to="HEAD")
push_parser = workspace_subparser(subparsers, "push")
push_parser.add_argument("-f", "--force", action="store_true", default=False)
push_parser.add_argument("-t", "--target", dest="target_branch", default="master")
github_group = push_parser.add_argument_group("github options")
github_group.add_argument("--merge", help="Merge pull request", action="store_true")
github_group.add_argument("--title", help="Title of the pull request")
gitlab_group = push_parser.add_argument_group("gitlab options")
gitlab_group.add_argument("--accept", action="store_true")
message_group = gitlab_group.add_mutually_exclusive_group()
message_group.add_argument("-m", "--message", dest="mr_title")
message_group.add_argument("--wip", action="store_true", help="Mark merge request as WIP")
message_group.add_argument("--ready", action="store_true", help="Mark merge request as ready")
gitlab_group.add_argument("-a", "--assignee", dest="assignee")
workspace_subparser(subparsers, "status")
workspace_subparser(subparsers, "sync")
args = parser.parse_args(args=args)
ui.setup(verbose=args.verbose, quiet=args.quiet, color=args.color)
command = args.command
if not command:
parser.print_help()
sys.exit(1)
module = importlib.import_module("tsrc.cli.%s" % command)
if command == "foreach":
fix_cmd_args_for_foreach(args, foreach_parser)
return module.main(args)
|
Python
| 0.000003 |
@@ -74,16 +74,26 @@
portlib%0A
+import os%0A
import s
@@ -1811,16 +1811,232 @@
apped%0A%0A%0A
+def setup_ui(args):%0A verbose = None%0A if os.environ.get(%22VERBOSE%22):%0A verbose = True%0A if args.verbose:%0A verbose = args.verbose%0A ui.setup(verbose=verbose, quiet=args.quiet, color=args.color)%0A%0A%0A
@main_wr
@@ -4742,73 +4742,21 @@
-ui.
setup
-(verbose=args.verbose, quiet=args.quiet, color=args.color
+_ui(args
)%0A%0A
|
d2a0c928b9cdb693ca75731e1ae2cefb4c7ae722
|
fix Episode JSON export
|
tvd/core/json.py
|
tvd/core/json.py
|
#!/usr/bin/env python
# encoding: utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2013-2014 CNRS (Hervé BREDIN -- http://herve.niderb.fr/)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from __future__ import unicode_literals
import simplejson as json
import pyannote.core.json
TVD_JSON = 'tvd'
def object_hook(d):
"""
Usage
-----
>>> with open('file.json', 'r') as f:
... json.load(f, object_hook=object_hook)
"""
from episode import Episode
if TVD_JSON in d:
if d[TVD_JSON] == 'episode':
return Episode.from_json(d)
d = pyannote.core.json.object_hook(d)
return d
def load(path):
with open(path, 'r') as f:
data = json.load(f, encoding='utf-8', object_hook=object_hook)
return data
def dump(data, path):
with open(path, 'w') as f:
json.dump(data, f, encoding='utf-8', for_json=True)
|
Python
| 0.000008 |
@@ -1538,17 +1538,17 @@
ON%5D == '
-e
+E
pisode':
@@ -1646,16 +1646,17 @@
turn d%0A%0A
+%0A
def load
|
240d697f378f85271278c47640d3328996db606e
|
Fix error message reporting.
|
tweepy/binder.py
|
tweepy/binder.py
|
# Tweepy
# Copyright 2009 Joshua Roesslein
# See LICENSE
import httplib
import urllib
import time
from . parsers import parse_error
from . error import TweepError
try:
import json #Python >= 2.6
except ImportError:
try:
import simplejson as json #Python < 2.6
except ImportError:
try:
from django.utils import simplejson as json #Google App Engine
except ImportError:
raise ImportError, "Can't load a json library"
def bind_api(path, parser, allowed_param=[], method='GET', require_auth=False,
timeout=None, host=None):
def _call(api, *args, **kargs):
# If require auth, throw exception if credentials not provided
if require_auth and not api.auth_handler:
raise TweepError('Authentication required!')
# check for post data
post_data = kargs.pop('post_data', None)
# check for retry request parameters
retry_count = kargs.pop('retry_count', api.retry_count)
retry_delay = kargs.pop('retry_delay', api.retry_delay)
retry_errors = kargs.pop('retry_errors', api.retry_errors)
# check for headers
headers = kargs.pop('headers', {})
# build parameter dict
if allowed_param:
parameters = {}
for idx, arg in enumerate(args):
try:
parameters[allowed_param[idx]] = arg
except IndexError:
raise TweepError('Too many parameters supplied!')
for k, arg in kargs.items():
if arg is None:
continue
if k in parameters:
raise TweepError('Multiple values for parameter %s supplied!' % k)
if k not in allowed_param:
raise TweepError('Invalid parameter %s supplied!' % k)
parameters[k] = arg
else:
if len(args) > 0 or len(kargs) > 0:
raise TweepError('This method takes no parameters!')
parameters = None
# Build url with parameters
if parameters:
url = '%s?%s' % (api.api_root + path, urllib.urlencode(parameters))
else:
url = api.api_root + path
# Check cache if caching enabled and method is GET
if api.cache and method == 'GET':
cache_result = api.cache.get(url, timeout)
# if cache result found and not expired, return it
if cache_result:
# must restore api reference
if isinstance(cache_result, list):
for result in cache_result:
result._api = api
else:
cache_result._api = api
return cache_result
# get scheme and host
if api.secure:
scheme = 'https://'
else:
scheme = 'http://'
_host = host or api.host
# Continue attempting request until successful
# or maxium number of retries is reached.
retries_performed = 0
while retries_performed < retry_count + 1:
# Open connection
# FIXME: add timeout
if api.secure:
conn = httplib.HTTPSConnection(_host)
else:
conn = httplib.HTTPConnection(_host)
# Apply authentication
if api.auth_handler:
api.auth_handler.apply_auth(
scheme + _host + url,
method, headers, parameters
)
# Build request
conn.request(method, url, headers=headers, body=post_data)
# Get response
resp = conn.getresponse()
# Exit request loop if non-retry error code
if retry_errors is None:
if resp.status == 200: break
else:
if resp.status not in retry_errors: break
# Sleep before retrying request again
time.sleep(retry_delay)
retries_performed += 1
# If an error was returned, throw an exception
api.last_response = resp
if resp.status != 200:
try:
error_msg = parse_error(resp.read())
except Exception:
error_msg = "Twitter error response: status code = %s" % resp.status
raise TweepError(error_msg)
# Parse json respone body
try:
jobject = json.loads(resp.read())
except Exception:
raise TweepError("Failed to parse json response text")
# Parse cursor infomation
if isinstance(jobject, dict):
next_cursor = jobject.get('next_cursor')
prev_cursor = jobject.get('previous_cursor')
else:
next_cursor = None
prev_cursor = None
# Pass json object into parser
try:
if next_cursor is not None and prev_cursor is not None:
out = parser(jobject, api), next_cursor, prev_cursor
else:
out = parser(jobject, api)
except Exception:
raise TweepError("Failed to parse json object")
conn.close()
# validate result
if api.validate:
# list of results
if isinstance(out, list) and len(out) > 0:
if hasattr(out[0], 'validate'):
for result in out:
result.validate()
# single result
else:
if hasattr(out, 'validate'):
out.validate()
# store result in cache
if api.cache and method == 'GET':
api.cache.store(url, out)
return out
# Set pagination mode
if 'cursor' in allowed_param:
_call.pagination_mode = 'cursor'
elif 'page' in allowed_param:
_call.pagination_mode = 'page'
return _call
|
Python
| 0 |
@@ -4259,16 +4259,27 @@
e_error(
+json.loads(
resp.rea
@@ -4274,32 +4274,33 @@
ads(resp.read())
+)
%0A exc
|
cd4da2e0fbed7bbadd4b110f45b7356795075aeb
|
add min_level to Logger
|
twiggy/Logger.py
|
twiggy/Logger.py
|
from Message import Message
import Levels
class Logger(object):
__slots__ = ['_fields', 'emitters']
def __init__(self, fields = None, emitters = None):
self._fields = fields if fields is not None else {}
self.emitters = emitters if emitters is not None else {}
def fields(self, **kwargs):
new_fields = self._fields.copy().update(**kwargs)
return self.__class__(new_fields, self.emitters)
def name(self, name):
return self.fields(name=name)
def struct(self, **kwargs):
self.fields(**kwargs).info()
def _emit(self, level, format_spec = '', *args, **kwargs):
msg = Message(level, format_spec, self._fields.copy(), *args, **kwargs)
for emitter in self.emitters.itervalues():
if emitter.min_level >= msg.level:
# XXX add appropriate error trapping & logging; watch for recursion
emitter.emit(msg)
def debug(self, *args, **kwargs):
self._emit(Levels.DEBUG, *args, **kwargs)
def info(self, *args, **kwargs):
self._emit(Levels.INFO, *args, **kwargs)
def warning(self, *args, **kwargs):
self._emit(Levels.WARNING, *args, **kwargs)
def error(self, *args, **kwargs):
self._emit(Levels.ERROR, *args, **kwargs)
def critical(self, *args, **kwargs):
self._emit(Levels.CRITICAL, *args, **kwargs)
|
Python
| 0.000001 |
@@ -96,16 +96,29 @@
mitters'
+, 'min_level'
%5D%0A%0A d
@@ -165,16 +165,42 @@
s = None
+, min_level = Levels.DEBUG
):%0A
@@ -318,16 +318,51 @@
else %7B%7D
+%0A self.min_level = min_level
%0A%0A de
@@ -500,16 +500,32 @@
emitters
+, self.min_level
)%0A%0A d
@@ -710,32 +710,75 @@
rgs, **kwargs):%0A
+ if level %3C self.min_level: return%0A%0A
msg = Me
|
a50c7c32f28d6f6e0ba369fc91f67f90edda7a66
|
Add a processing function to the server to simplify end of burst
|
txircd/server.py
|
txircd/server.py
|
from twisted.internet import reactor
from twisted.internet.defer import Deferred
from twisted.internet.task import LoopingCall
from twisted.words.protocols.irc import IRC
class IRCServer(IRC):
def __init__(self, ircd, ip, received):
self.ircd = ircd
self.serverID = None
self.name = None
self.description = None
self.ip = ip
self.remoteServers = {}
self.nextClosest = self.ircd.serverID
self.cache = {}
self.bursted = None
self.disconnectedDeferred = Deferred()
self.receivedConnection = received
self._pinger = LoopingCall(self._ping)
self._registrationTimeoutTimer = reactor.callLater(self.ircd.config.getWithDefault("server_registration_timeout", 10), self._timeoutRegistration)
def handleCommand(self, command, prefix, params):
if command not in self.ircd.serverCommands:
self.disconnect("Unknown command {}".format(command)) # If we receive a command we don't recognize, abort immediately to avoid a desync
return
if self.bursted is False:
if "burst_queue" not in self.cache:
self.cache["burst_queue"] = []
self.cache["burst_queue"].append((command, prefix, params))
return
handlers = self.ircd.serverCommands[command]
data = None
for handler in handlers:
data = handler[0].parseParams(self, params, prefix, {})
if data is not None:
break
if data is None:
self.disconnect("Failed to parse command {} from {} with parameters '{}'".format(command, prefix, " ".join(params))) # If we receive a command we can't parse, also abort immediately
return
for handler in handlers:
if handler[0].execute(self, data):
break
else:
self.disconnect("Couldn't process command {} from {} with parameters '{}'".format(command, prefix, " ".join(params))) # Also abort connection if we can't process a command
return
def connectionLost(self, reason):
if self.serverID in self.ircd.servers:
self.disconnect("Connection reset")
self.disconnectedDeferred.callback(None)
def disconnect(self, reason):
self.ircd.runActionStandard("serverquit", self, reason)
del self.ircd.servers[self.serverID]
del self.ircd.serverNames[self.name]
netsplitQuitMsg = "{} {}".format(self.ircd.servers[self.nextClosest].name if self.nextClosest in self.ircd.servers else self.ircd.name, self.name)
allUsers = self.ircd.users.values()
for user in allUsers:
if user.uuid[:3] == self.serverID or user.uuid[:3] in self.remoteServers:
user.disconnect(netsplitQuitMsg)
self._endConnection()
def _endConnection(self):
self.transport.loseConnection()
def _timeoutRegistration(self):
if self.serverID and self.name:
self._pinger.start(self.ircd.config.getWithDefault("server_ping_frequency", 60))
return
self.disconnect("Registration timeout")
def _ping(self):
self.ircd.runActionStandard("pingserver", self)
def register():
if not self.serverID:
return
if not self.name:
return
self.ircd.servers[self.serverID] = self
self.ircd.serverNames[self.name] = self.serverID
self.ircd.runActionStandard("serverconnect", self)
class RemoteServer(IRCServer):
def __init__(self, ircd, ip):
IRCServer.__init__(self, ircd, ip, True)
self._registrationTimeoutTimer.cancel()
def sendMessage(self, command, *params, **kw):
target = self
while target.nextClosest != self.ircd.serverID:
target = self.ircd.servers[target.nextClosest]
target.sendMessage(command, *params, **kw)
def _endConnection(self):
pass
|
Python
| 0.000001 |
@@ -2069,24 +2069,241 @@
return%0A %0A
+ def endBurst(self):%0A self.bursted = True%0A for command, prefix, params in self.cache%5B%22burst_queue%22%5D:%0A self.handleCommand(command, prefix, params)%0A del self.cache%5B%22burst_queue%22%5D%0A %0A
def conn
|
8d125eec9faa0da86ea3557cd7a618a173bc4f47
|
fix for gettext headers in help
|
external/django-ajax-selects-1.1.4/ajax_select/__init__.py
|
external/django-ajax-selects-1.1.4/ajax_select/__init__.py
|
"""JQuery-Ajax Autocomplete fields for Django Forms"""
__version__ = "1.1.4"
__author__ = "crucialfelix"
__contact__ = "[email protected]"
__homepage__ = "http://code.google.com/p/django-ajax-selects/"
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models.fields.related import ForeignKey, ManyToManyField
from django.forms.models import ModelForm
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _, ugettext
def make_ajax_form(model,fieldlist,superclass=ModelForm):
""" this will create a ModelForm subclass inserting
AutoCompleteSelectMultipleField (many to many),
AutoCompleteSelectField (foreign key)
where specified in the fieldlist:
dict(fieldname='channel',...)
usage:
class YourModelAdmin(Admin):
...
form = make_ajax_form(YourModel,dict(contacts='contact',author='contact'))
where 'contacts' is a many to many field, specifying to use the lookup channel 'contact'
and
where 'author' is a foreign key field, specifying here to also use the lookup channel 'contact'
"""
class TheForm(superclass):
class Meta:
pass
setattr(Meta, 'model', model)
for model_fieldname,channel in fieldlist.iteritems():
f = make_ajax_field(model,model_fieldname,channel)
TheForm.declared_fields[model_fieldname] = f
TheForm.base_fields[model_fieldname] = f
setattr(TheForm,model_fieldname,f)
return TheForm
def make_ajax_field(model,model_fieldname,channel,**kwargs):
""" makes an ajax select / multiple select / autocomplete field
copying the label and help text from the model's db field
optional args:
help_text - note that django's ManyToMany db field will append
'Hold down "Control", or "Command" on a Mac, to select more than one.'
to your db field's help text.
Therefore you are better off passing it in here
label - default is db field's verbose name
required - default's to db field's (not) blank
"""
from ajax_select.fields import AutoCompleteField, \
AutoCompleteSelectMultipleField, \
AutoCompleteSelectField
field = model._meta.get_field(model_fieldname)
if kwargs.has_key('label'):
label = kwargs.pop('label')
else:
label = _(capfirst(unicode(field.verbose_name)))
if kwargs.has_key('help_text'):
help_text = kwargs.pop('help_text')
else:
if isinstance(field.help_text,basestring):
help_text = _(field.help_text)
else:
help_text = field.help_text
if kwargs.has_key('required'):
required = kwargs.pop('required')
else:
required = not field.blank
if isinstance(field,ManyToManyField):
f = AutoCompleteSelectMultipleField(
channel,
required=required,
help_text=help_text,
label=label,
**kwargs
)
elif isinstance(field,ForeignKey):
f = AutoCompleteSelectField(
channel,
required=required,
help_text=help_text,
label=label,
**kwargs
)
else:
f = AutoCompleteField(
channel,
required=required,
help_text=help_text,
label=label,
**kwargs
)
return f
def get_lookup(channel):
""" find the lookup class for the named channel. this is used internally """
try:
lookup_label = settings.AJAX_LOOKUP_CHANNELS[channel]
except (KeyError, AttributeError):
raise ImproperlyConfigured("settings.AJAX_LOOKUP_CHANNELS not configured correctly for %r" % channel)
if isinstance(lookup_label,dict):
# 'channel' : dict(model='app.model', search_field='title' )
# generate a simple channel dynamically
return make_channel( lookup_label['model'], lookup_label['search_field'] )
else:
# 'channel' : ('app.module','LookupClass')
# from app.module load LookupClass and instantiate
lookup_module = __import__( lookup_label[0],{},{},[''])
lookup_class = getattr(lookup_module,lookup_label[1] )
return lookup_class()
def make_channel(app_model,search_field):
""" used in get_lookup
app_model : app_name.model_name
search_field : the field to search against and to display in search results """
from django.db import models
app_label, model_name = app_model.split(".")
model = models.get_model(app_label, model_name)
class AjaxChannel(object):
def get_query(self,q,request):
""" return a query set searching for the query string q """
kwargs = { "%s__icontains" % search_field : q }
return model.objects.filter(**kwargs).order_by(search_field)
def format_item(self,obj):
""" format item for simple list of currently selected items """
return unicode(obj)
def format_result(self,obj):
""" format search result for the drop down of search results. may include html """
return unicode(obj)
def get_objects(self,ids):
""" get the currently selected objects """
return model.objects.filter(pk__in=ids).order_by(search_field)
return AjaxChannel()
|
Python
| 0.000012 |
@@ -2763,16 +2763,42 @@
estring)
+ and field.help_text != ''
:%0A
|
04f2c9005a04559a48ad0919b840d709c0f4eeaa
|
Update version.
|
neupy/__init__.py
|
neupy/__init__.py
|
"""
NeuPy is the Artificial Neural Network library implemented in Python.
"""
__version__ = '0.1.1a'
|
Python
| 0 |
@@ -93,11 +93,10 @@
= '0.1.1
-a
'%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.