commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
---|---|---|---|---|---|---|---|
33f35afe991dd1a3c0ba1868cceee32a4e45b8e4
|
version bump patch
|
filetype/__init__.py
|
filetype/__init__.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from .filetype import * # noqa
from .helpers import * # noqa
from .match import * # noqa
# Current package semver version
__version__ = version = '1.0.5'
|
Python
| 0 |
@@ -217,7 +217,7 @@
1.0.
-5
+6
'%0A
|
b2859bfde66d7d91f98e3cfb61e205c1d2f5dbfe
|
Make CommentFactory use fuzzy attrs
|
hackernews_scraper/test/factories.py
|
hackernews_scraper/test/factories.py
|
import factory
class ItemFactory(factory.Factory):
FACTORY_FOR = dict
objectID = 21
created_at_i = 42
title = "Test item"
class CommentFactory(factory.Factory):
FACTORY_FOR = dict
created_at = "2014-04-03T10:17:28.000Z"
title = "Test comment"
url = "www.google.com"
comment_text = "Fuzzy wuzzy was a bear"
story_id = 42
story_title = "Bear kills man"
story_url = "www.bing.com"
author = "yourmom"
points = 42
created_at_i = 42
objectID = 42
parent_id = 42
class StoryFactory(factory.Factory):
FACTORY_FOR = dict
created_at = "2014-04-03T10:17:28.000Z"
created_at_i = 42
title = "Test story"
url = "www.google.com"
author = "yourdad"
points = 42
story_text = "Fuzzy wuzzy had no hair"
story_id = 42
class ResponseFactory(factory.Factory):
FACTORY_FOR = dict
nbPages = 1
hits = [ItemFactory(), ItemFactory()]
nbHits = factory.LazyAttribute(lambda x: x.nbPages * len(x.hits))
hitsPerPage = factory.LazyAttribute(lambda x: len(x.hits))
|
Python
| 0 |
@@ -1,18 +1,121 @@
-import factory
+from datetime import datetime, timedelta%0Aimport factory%0Afrom factory.fuzzy import FuzzyText, FuzzyInteger%0Aimport time
%0A%0A%0Ac
@@ -310,74 +310,228 @@
-created_at = %222014-04-03T10:17:28.000Z%22%0A title = %22Test comment%22
[email protected]%0A def created_at(n):%0A return (datetime.now() - timedelta(minutes=n)).isoformat()%0A%0A @factory.sequence%0A def created_at_i(n):%0A return time.time() - n%0A%0A title = FuzzyText(length=20)
%0A
@@ -577,32 +577,29 @@
t =
-%22
Fuzzy
- wuzzy was a bear%22
+Text(length=300)
%0A
@@ -696,92 +696,126 @@
r =
-%22yourmom%22%0A points = 42%0A created_at_i = 42%0A objectID = 42%0A parent_id = 42
+FuzzyText(length=10)%0A points = FuzzyInteger(100)%0A objectID = FuzzyInteger(100)%0A parent_id = FuzzyInteger(100)
%0A%0A%0Ac
@@ -878,52 +878,8 @@
ct%0A%0A
- created_at = %222014-04-03T10:17:28.000Z%22%0A
@@ -1305,9 +1305,8 @@
.hits))%0A
-%0A
|
8039e38ae806bc3aecfa2cb9824ebfd1c9fdc10e
|
Revert "Potential fix for cell caching issue"
|
powershell_kernel/powershell_proxy.py
|
powershell_kernel/powershell_proxy.py
|
import threading
try:
import queue
except ImportError:
import Queue as queue
from threading import Timer
from time import sleep
class ReplReader(threading.Thread):
def __init__(self, repl):
super(ReplReader, self).__init__()
self.repl = repl
self.daemon = True
self.queue = queue.Queue()
self.start()
def run(self):
r = self.repl
q = self.queue
while True:
result = r.read()
q.put(result)
if result is None:
break
class ReplProxy(object):
def __init__(self, repl):
self._repl = repl
self.expected_carets = 1
self._repl_reader = ReplReader(repl)
# this is a hack to detect when we stop processing this input
self.send_input('function prompt() {"^"}')
self.stop_flag = False
self.output = ''
self.timer = Timer(0.1, self.update_view_loop)
self.timer.start()
# get preambula and eveluation of the prompt
self.get_output()
self.output_prefix_stripped = True
self.expected_output_prefix = ''
self.expected_output_len = 0
def get_output(self):
while not self.stop_flag:
sleep(0.05)
out = self.output
self.output = ''
self.stop_flag = False
return out
def send_input(self, input):
# TODO: we should block here until we return output for previous command, should we?
# for multiline statements we should send 1 extra new line
# https://stackoverflow.com/questions/13229066/how-to-end-a-multi-line-command-in-powershell
if '\n' in input:
input += '\n'
self.expected_carets = input.count('\n')
self.expected_output_prefix = input.replace('\n', '\n>> ') + '\n'
self.expected_output_len = len(self.expected_output_prefix)
self.output_prefix_stripped = False
self._repl.write(input + '\n')
def handle_repl_output(self):
"""Returns new data from Repl and bool indicating if Repl is still
working"""
if self.stop_flag:
return True
try:
while True:
packet = self._repl_reader.queue.get_nowait()
if packet is None:
return False
self.write(packet)
except queue.Empty:
return True
def update_view_loop(self):
is_still_working = self.handle_repl_output()
if is_still_working:
self.timer = Timer(0.1, self.update_view_loop)
self.timer.start()
else:
self.write("\n***Repl Killed***\n""")
def write(self, packet):
# this is a hack to detect when we stop processing this input
if packet == '^':
self.expected_carets -= 1
if self.expected_carets < 1:
self.stop_flag = True
return
self.output += packet
if not self.output_prefix_stripped and len(self.output) >= self.expected_output_len:
if self.output[:self.expected_output_len] != self.expected_output_prefix:
print("Unexpected prefix: %r : Expected %r" % (
self.output[:self.expected_output_len], self.expected_output_prefix
))
else:
self.output_prefix_stripped = True
self.output = self.output[self.expected_output_len:]
|
Python
| 0 |
@@ -627,41 +627,8 @@
epl%0A
- self.expected_carets = 1%0A
@@ -668,16 +668,16 @@
r(repl)%0A
+
@@ -1675,61 +1675,8 @@
'%5Cn'
-%0A self.expected_carets = input.count('%5Cn')
%0A%0A
@@ -2737,91 +2737,8 @@
%5E':%0A
- self.expected_carets -= 1%0A if self.expected_carets %3C 1:%0A
|
fc03e0fdf3e7389ee668d66cf7224df2aaaaa95b
|
set write permissions for cfg_group on vyos-migrate.log
|
python/vyos/migrator.py
|
python/vyos/migrator.py
|
# Copyright 2019 VyOS maintainers and contributors <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import subprocess
import vyos.version
import vyos.defaults
import vyos.systemversions as systemversions
import vyos.formatversions as formatversions
class MigratorError(Exception):
pass
class Migrator(object):
def __init__(self, config_file, force=False, set_vintage=None):
self._config_file = config_file
self._force = force
self._set_vintage = set_vintage
self._config_file_vintage = None
self._log_file = None
self._changed = False
def read_config_file_versions(self):
"""
Get component versions from config file footer and set vintage;
return empty dictionary if config string is missing.
"""
cfg_file = self._config_file
component_versions = {}
cfg_versions = formatversions.read_vyatta_versions(cfg_file)
if cfg_versions:
self._config_file_vintage = 'vyatta'
component_versions = cfg_versions
cfg_versions = formatversions.read_vyos_versions(cfg_file)
if cfg_versions:
self._config_file_vintage = 'vyos'
component_versions = cfg_versions
return component_versions
def update_vintage(self):
old_vintage = self._config_file_vintage
if self._set_vintage:
self._config_file_vintage = self._set_vintage
if not self._config_file_vintage:
self._config_file_vintage = vyos.defaults.cfg_vintage
if self._config_file_vintage not in ['vyatta', 'vyos']:
raise MigratorError("Unknown vintage.")
if self._config_file_vintage == old_vintage:
return False
else:
return True
def open_log_file(self):
"""
Open log file for migration, catching any error.
Note that, on boot, migration takes place before the canonical log
directory is created, hence write to the config file directory.
"""
self._log_file = os.path.join(vyos.defaults.directories['config'],
'vyos-migrate.log')
try:
log = open('{0}'.format(self._log_file), 'w')
log.write("List of executed migration scripts:\n")
except Exception as e:
print("Logging error: {0}".format(e))
return None
return log
def run_migration_scripts(self, config_file_versions, system_versions):
"""
Run migration scripts iteratively, until config file version equals
system component version.
"""
log = self.open_log_file()
cfg_versions = config_file_versions
sys_versions = system_versions
sys_keys = list(sys_versions.keys())
sys_keys.sort()
rev_versions = {}
for key in sys_keys:
sys_ver = sys_versions[key]
if key in cfg_versions:
cfg_ver = cfg_versions[key]
else:
cfg_ver = 0
migrate_script_dir = os.path.join(
vyos.defaults.directories['migrate'], key)
while cfg_ver < sys_ver:
next_ver = cfg_ver + 1
migrate_script = os.path.join(migrate_script_dir,
'{}-to-{}'.format(cfg_ver, next_ver))
try:
subprocess.check_call([migrate_script,
self._config_file])
except FileNotFoundError:
pass
except Exception as err:
print("\nMigration script error: {0}: {1}."
"".format(migrate_script, err))
sys.exit(1)
if log:
try:
log.write('{0}\n'.format(migrate_script))
except Exception as e:
print("Error writing log: {0}".format(e))
cfg_ver = next_ver
rev_versions[key] = cfg_ver
if log:
log.close()
return rev_versions
def write_config_file_versions(self, cfg_versions):
"""
Write new versions string.
"""
versions_string = formatversions.format_versions_string(cfg_versions)
os_version_string = vyos.version.get_version()
if self._config_file_vintage == 'vyatta':
formatversions.write_vyatta_versions_foot(self._config_file,
versions_string,
os_version_string)
if self._config_file_vintage == 'vyos':
formatversions.write_vyos_versions_foot(self._config_file,
versions_string,
os_version_string)
def run(self):
"""
Gather component versions from config file and system.
Run migration scripts.
Update vintage ('vyatta' or 'vyos'), if needed.
If changed, remove old versions string from config file, and
write new versions string.
"""
cfg_file = self._config_file
cfg_versions = self.read_config_file_versions()
if self._force:
# This will force calling all migration scripts:
cfg_versions = {}
sys_versions = systemversions.get_system_versions()
rev_versions = self.run_migration_scripts(cfg_versions, sys_versions)
if rev_versions != cfg_versions:
self._changed = True
if self.update_vintage():
self._changed = True
if not self._changed:
return
formatversions.remove_versions(cfg_file)
self.write_config_file_versions(rev_versions)
def config_changed(self):
return self._changed
class VirtualMigrator(Migrator):
def __init__(self, config_file, vintage='vyos'):
super().__init__(config_file, set_vintage = vintage)
def run(self):
cfg_file = self._config_file
cfg_versions = self.read_config_file_versions()
if not cfg_versions:
raise MigratorError("Config file has no version information;"
" virtual migration not possible.")
if self.update_vintage():
self._changed = True
if not self._changed:
return
formatversions.remove_versions(cfg_file)
self.write_config_file_versions(cfg_versions)
|
Python
| 0 |
@@ -2823,16 +2823,100 @@
e.log')%0A
+ # on creation, allow write permission for cfg_group%0A os.umask(0o113)%0A
|
c1d3f48ca717f627ca461c73f5be308a57fbdcf7
|
MOVE handler_name to dict
|
wood/wood.py
|
wood/wood.py
|
"""
Wood author: thislight
Copyright 2016 thislight
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Under License Apache v2, more information, see file 'LICENSE' in project root directory.
"""
import tornado.httpserver as _httpserver
import tornado.web as _web
import tornado.ioloop as _ioloop
from .timeit import timeit
import logging
import time
BASELOGTEMPLATE = '{method} {httpver} {path} {handler_name} {request_time}s/{timeit}s'
class BaseTornadoView(_web.RequestHandler):
def _get_info(self):
_r = self.request
return dict(
method=_r.method,
path=_r.path,
httpver=_r.version,
cliip=_r.remote_ip,
p=_r.protocol,
issec=True if _r.protocol.endswith('s') else False,
host=_r.host,
args=_r.arguments,
request_time=_r.request_time(),
timeit=self._time,
)
def __log__(self):
info = self._get_info()
return BASELOGTEMPLATE.format(**info,handler_name=self.__name__)
def timeit_callback(self,s):
self._time = s
class RegisterAllow(object):
pass
class OverrideObject(object):
def override(self,name,back=False):
def override(func):
setattr(self.handler,name,func)
if back: return func
return override
class _PackedView(RegisterAllow,OverrideObject):
def __init__(self,view,uri='/'):
self._view = view
self.__name__ = self._view.__name__ + '_Packed'
self._uri = uri
@property
def get(self):
return self.override('get',back=True)
@property
def post(self):
return self.override('post',back=True)
@property
def head(self):
return self.override('head',back=True)
@property
def put(self):
return self.override('put',back=True)
@property
def delete(self):
return self.override('delete',back=True)
@property
def patch(self):
return self.override('patch',back=True)
@property
def options(self):
return self.override('options',back=True)
@property
def handler(self):
return self._view
@handler.setter
def handler(self,value):
self._view = value
@property
def uri(self):
return self._uri
def _timeit(self,f):
return timeit(callback=self. handler.timeit_callback,num=2)(f)
def override(self, back=False):
def override(func):
setattr(self.handler,name,func)
if back: self._timeit(return func)
return override
def _make_empty_view(name='View',uri,*parents):
"""
a help function for make a empty view.
Return: _PackedView
"""
view = type(name,(BaseTornadoView,*parent),{})
_packed = _PackedView(view,uri=uri)
return _packed
def _make_uri_tuple(uri,handler,kargs=None):
t = [uri,handler]
if kargs: t.append(kargs)
return tuple(t)
def _print_and_log(logger=logging.getLogger(),*args):
print(*args)
logger.info(*args)
def base_log_function(o):
if hasattr(o,'__log__'):#
_print_and_log(o.__log__)
elif isinstance(o,_web.ErrorHandler):
_print_and_log(o)# TODO: More simple and useful infomation
else:
_print_and_log(o)
class Wood(object):
def __init__(self,name=__name__,**config):
self._app = _web.Application(**config)
self._server = _httpserver.HTTPServer(self._app,xheaders=True)
self._name = name
self.prepare_funcs = []
if 'log_function' not in self.application.settings:
self.application.settings['log_function'] = base_log_function
@property
def server(self):
return self._server
@property
def application(self):
return self._app
def handler(self,uri,handler,host='',**kargs):
self.application.add_handlers(host,[_make_uri_tuple(uri,handler,kargs)])
def empty(self,uri,name,*parents):
"""
Return: _PackedView
"""
v = _make_empty_view(uri=uri,name=name,*parents)
self.register(v)
return v
def route(self,uri,method='get',*parents,**kargs):
"""
Route a function to uri.
arg method: method for function
Return: function
"""
def route(f):
"""
Return: function
"""
view = self.empty(uri,f.__name__,*parents)
method = method.lower()
return view.override(method)(f,back=True)
return route
def register(self,view):
self.handler(uri=view.uri,handler=view.handler)
def register_all(self,g):
for k in g:
o = g[k]
if isinstance(o,RegisterAllow):
self.register(o)
def bind(self,port):
self.server.bind(port)
def prepare(self,func):
self.prepare_funcs.append(func)
return func
def call_prepare(self):
for f in self.prepare_funcs:
f(self)
def _start(self):
self.call_prepare()
self.ioloop.start()
def start(self,port=None,wokers=None):
if port: self.bind(port)
if not wokers:
self.server.start()
else:
self.server.start(wokers)
self._start()
@property
def ioloop(self):
return _ioloop.IOLoop.current()
# The end of the file
|
Python
| 0.000005 |
@@ -1358,16 +1358,52 @@
._time,%0A
+ handler_name=self.__name__,%0A
@@ -1512,35 +1512,8 @@
info
-,handler_name=self.__name__
)%0A
|
5e1e0ba1dca301eb597fb319c68280f7ee761037
|
Add twopeasandtheirpod and simplyrecipes to __init__
|
recipe_scrapers/__init__.py
|
recipe_scrapers/__init__.py
|
import re
from .allrecipes import AllRecipes
SCRAPERS = {
AllRecipes.host(): AllRecipes,
}
def url_path_to_dict(path):
pattern = (r'^'
r'((?P<schema>.+?)://)?'
r'((?P<user>.+?)(:(?P<password>.*?))?@)?'
r'(?P<host>.*?)'
r'(:(?P<port>\d+?))?'
r'(?P<path>/.*?)?'
r'(?P<query>[?].*?)?'
r'$'
)
regex = re.compile(pattern)
matches = regex.match(path)
url_dict = matches.groupdict() if matches is not None else None
return url_dict
def scrap_me(url_path):
return SCRAPERS[url_path_to_dict(url_path)['host']](url_path)
__all__ = ['scrap_me']
|
Python
| 0.000004 |
@@ -43,56 +43,240 @@
pes%0A
-%0A%0ASCRAPERS = %7B%0A AllRecipes.host(): AllRecipes
+from .simplyrecipes import SimplyRecipes%0Afrom .twopeasandtheirpod import TwoPeasAndTheirPod%0A%0A%0ASCRAPERS = %7B%0A AllRecipes.host(): AllRecipes,%0A SimplyRecipes.host(): SimplyRecipes,%0A TwoPeasAndTheirPod.host(): TwoPeasAndTheirPod
,%0A%7D%0A
|
5fd70e01f648da6dfc994bfe0e5c666c69fa9e45
|
return None (null) in preference to empty string when recipe yield is unavailable
|
recipe_scrapers/vegolosi.py
|
recipe_scrapers/vegolosi.py
|
from ._abstract import AbstractScraper
from ._utils import get_minutes, get_yields, normalize_string
class Vegolosi(AbstractScraper):
@classmethod
def host(cls):
return "vegolosi.it"
def title(self):
return self.soup.find("h1").get_text().strip()
def preparation_time(self):
possible_time_info_elements = self.soup.findAll(
"span", {"class": "tasty-recipes-prep-time"}
)
return sum([get_minutes(element) for element in possible_time_info_elements])
def cooking_time(self):
possible_time_info_elements = self.soup.findAll(
"span", {"class": "tasty-recipes-cook-time"}
)
return sum([get_minutes(element) for element in possible_time_info_elements])
def total_time(self):
possible_time_info_elements = self.soup.findAll(
"span", {"class": "tasty-recipes-total-time"}
)
return sum([get_minutes(element) for element in possible_time_info_elements])
def yields(self):
possible_yields_info_elements = self.soup.findAll(
"span", {"class": "tasty-recipes-yield"}
)
for element in possible_yields_info_elements:
if "persone" in element.get_text():
return get_yields(element)
return ""
def ingredients(self):
ingredients = self.soup.select(".tasty-recipe-ingredients > ul > li")
if not ingredients:
ingredients = self.soup.findAll("li", {"class": "ingredient"})
return [normalize_string(ingredient.get_text()) for ingredient in ingredients]
def instructions(self):
instructions = self.soup.findAll("div", {"class": "tasty-recipe-instructions"})
return "\n".join(
[normalize_string(instruction.get_text()) for instruction in instructions]
)
def ratings(self):
return round(
float(
self.soup.find("div", {"class": "tasty-recipe-rating rating_panel"})
.get("data-content-rate")
.replace(",", ".")
),
2,
)
|
Python
| 0.000089 |
@@ -1287,26 +1287,8 @@
ent)
-%0A return %22%22
%0A%0A
|
c88b7d5fa934e25ae426d8b918d6eb8de414682d
|
Add missing _ssl constant. Close PyCQA/pylint#2629
|
astroid/brain/brain_ssl.py
|
astroid/brain/brain_ssl.py
|
# Copyright (c) 2016 Claudiu Popa <[email protected]>
# Copyright (c) 2016 Ceridwen <[email protected]>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""Astroid hooks for the ssl library."""
from astroid import MANAGER, register_module_extender
from astroid.builder import AstroidBuilder
from astroid import nodes
from astroid import parse
def ssl_transform():
return parse(
"""
from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION
from _ssl import _SSLContext, MemoryBIO
from _ssl import (
SSLError, SSLZeroReturnError, SSLWantReadError, SSLWantWriteError,
SSLSyscallError, SSLEOFError,
)
from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED
from _ssl import txt2obj as _txt2obj, nid2obj as _nid2obj
from _ssl import RAND_status, RAND_add, RAND_bytes, RAND_pseudo_bytes
try:
from _ssl import RAND_egd
except ImportError:
# LibreSSL does not provide RAND_egd
pass
from _ssl import (OP_ALL, OP_CIPHER_SERVER_PREFERENCE,
OP_NO_COMPRESSION, OP_NO_SSLv2, OP_NO_SSLv3,
OP_NO_TLSv1, OP_NO_TLSv1_1, OP_NO_TLSv1_2,
OP_SINGLE_DH_USE, OP_SINGLE_ECDH_USE)
from _ssl import (ALERT_DESCRIPTION_ACCESS_DENIED, ALERT_DESCRIPTION_BAD_CERTIFICATE,
ALERT_DESCRIPTION_BAD_CERTIFICATE_HASH_VALUE,
ALERT_DESCRIPTION_BAD_CERTIFICATE_STATUS_RESPONSE,
ALERT_DESCRIPTION_BAD_RECORD_MAC,
ALERT_DESCRIPTION_CERTIFICATE_EXPIRED,
ALERT_DESCRIPTION_CERTIFICATE_REVOKED,
ALERT_DESCRIPTION_CERTIFICATE_UNKNOWN,
ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE,
ALERT_DESCRIPTION_CLOSE_NOTIFY, ALERT_DESCRIPTION_DECODE_ERROR,
ALERT_DESCRIPTION_DECOMPRESSION_FAILURE,
ALERT_DESCRIPTION_DECRYPT_ERROR,
ALERT_DESCRIPTION_HANDSHAKE_FAILURE,
ALERT_DESCRIPTION_ILLEGAL_PARAMETER,
ALERT_DESCRIPTION_INSUFFICIENT_SECURITY,
ALERT_DESCRIPTION_INTERNAL_ERROR,
ALERT_DESCRIPTION_NO_RENEGOTIATION,
ALERT_DESCRIPTION_PROTOCOL_VERSION,
ALERT_DESCRIPTION_RECORD_OVERFLOW,
ALERT_DESCRIPTION_UNEXPECTED_MESSAGE,
ALERT_DESCRIPTION_UNKNOWN_CA,
ALERT_DESCRIPTION_UNKNOWN_PSK_IDENTITY,
ALERT_DESCRIPTION_UNRECOGNIZED_NAME,
ALERT_DESCRIPTION_UNSUPPORTED_CERTIFICATE,
ALERT_DESCRIPTION_UNSUPPORTED_EXTENSION,
ALERT_DESCRIPTION_USER_CANCELLED)
from _ssl import (SSL_ERROR_EOF, SSL_ERROR_INVALID_ERROR_CODE, SSL_ERROR_SSL,
SSL_ERROR_SYSCALL, SSL_ERROR_WANT_CONNECT, SSL_ERROR_WANT_READ,
SSL_ERROR_WANT_WRITE, SSL_ERROR_WANT_X509_LOOKUP, SSL_ERROR_ZERO_RETURN)
from _ssl import VERIFY_CRL_CHECK_CHAIN, VERIFY_CRL_CHECK_LEAF, VERIFY_DEFAULT, VERIFY_X509_STRICT
from _ssl import HAS_SNI, HAS_ECDH, HAS_NPN, HAS_ALPN
from _ssl import _OPENSSL_API_VERSION
from _ssl import PROTOCOL_SSLv23, PROTOCOL_TLSv1, PROTOCOL_TLSv1_1, PROTOCOL_TLSv1_2
"""
)
register_module_extender(MANAGER, "ssl", ssl_transform)
|
Python
| 0.000006 |
@@ -3479,16 +3479,50 @@
TLSv1_2%0A
+ from _ssl import PROTOCOL_TLS%0A
%22%22%22%0A
|
631270eeafad8fd6b20973673f6d6e8b733e9029
|
enable email
|
quant/tool/email_box.py
|
quant/tool/email_box.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from email.mime.text import MIMEText
from quant import config
import smtplib
mail_to = ["[email protected]"]
mail_host = "smtp.163.com"
mail_user = "[email protected]"
'''163邮箱smtp生成的密码'''
mail_pass = config.EMAIL_PASSWORD_163
mail_subject = 'logging'
def send_mail(content):
pass
# me = "QuantBot" + "<" + mail_user + ">"
# msg = MIMEText(_text=content, _subtype='plain', _charset='utf-8')
# msg['Subject'] = mail_subject
# msg['From'] = me
# msg['To'] = ";".join(mail_to)
# try:
# server = smtplib.SMTP()
# server.connect(mail_host)
# server.login(mail_user, mail_pass)
# server.sendmail(me, mail_to, msg.as_string())
# server.close()
# return True
# except Exception as e:
# print (e)
# return False
if __name__ == '__main__':
# for test
send_mail('content')
|
Python
| 0.00004 |
@@ -336,16 +336,18 @@
nt):%0A
+ #
pass%0A
@@ -347,18 +347,16 @@
pass%0A
- #
me = %22Q
@@ -387,26 +387,24 @@
er + %22%3E%22%0A
- #
msg = MIMET
@@ -457,26 +457,24 @@
'utf-8')%0A
- #
msg%5B'Subjec
@@ -491,26 +491,24 @@
_subject%0A
- #
msg%5B'From'%5D
@@ -516,18 +516,16 @@
= me%0A
- #
msg%5B'To
@@ -550,18 +550,16 @@
_to)%0A
- #
try:%0A
@@ -555,26 +555,24 @@
try:%0A
- #
server
@@ -587,26 +587,24 @@
b.SMTP()%0A
- #
server.
@@ -621,26 +621,24 @@
il_host)%0A
- #
server.
@@ -664,26 +664,24 @@
il_pass)%0A
- #
server.
@@ -718,26 +718,24 @@
tring())%0A
- #
server.
@@ -741,26 +741,24 @@
.close()%0A
- #
return
@@ -765,18 +765,16 @@
True%0A
- #
except
@@ -788,26 +788,24 @@
on as e:%0A
- #
print (
@@ -810,18 +810,16 @@
(e)%0A
- #
ret
|
cca16ade9257464352917586a5beeb69a05373e8
|
Add a TODO for b/150599675 and temporarily remove the assertion of `p13n_metrics['epoch_1']`.
|
tensorflow_federated/python/research/personalization/p13n_utils_test.py
|
tensorflow_federated/python/research/personalization/p13n_utils_test.py
|
# Lint as: python3
# Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
from tensorflow_federated.python.research.personalization import p13n_utils
def _create_dataset():
"""Constructs an unbatched dataset with three datapoints."""
ds = collections.OrderedDict([('x', [[-1.0, -1.0], [1.0, 1.0], [1.0, 1.0]]),
('y', [[1.0], [1.0], [1.0]])])
return tf.data.Dataset.from_tensor_slices(ds)
def _model_fn():
"""Constructs a linear model with weights initialized to be zeros."""
inputs = tf.keras.Input(shape=(2,)) # feature dim = 2
outputs = tf.keras.layers.Dense(1, kernel_initializer='zeros')(inputs)
keras_model = tf.keras.Model(inputs=inputs, outputs=outputs)
dummy_batch = collections.OrderedDict([('x', np.zeros([1, 2],
dtype=np.float32)),
('y', np.zeros([1, 1],
dtype=np.float32))])
return tff.learning.from_keras_model(
keras_model=keras_model,
dummy_batch=dummy_batch,
loss=tf.keras.losses.MeanSquaredError(),
metrics=[tf.keras.metrics.MeanAbsoluteError()])
class P13NUtilsTest(tf.test.TestCase):
def test_evaluate_fn_succeeds_with_valid_args(self):
model = _model_fn()
dataset = _create_dataset()
metrics = p13n_utils.evaluate_fn(model=model, dataset=dataset, batch_size=1)
# Since the weights are all zeros, both MSE and MAE equal 1.0.
self.assertDictContainsSubset({
'loss': 1.0,
'mean_absolute_error': 1.0
}, metrics)
def test_build_personalize_fn_succeeds_with_valid_args(self):
model = _model_fn()
dataset = _create_dataset()
p13n_fn = p13n_utils.build_personalize_fn(
optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.5),
train_batch_size=2,
max_num_epochs=1,
num_epochs_per_eval=1,
test_batch_size=1,
shuffle=False)
p13n_metrics = p13n_fn(model=model, train_data=dataset, test_data=dataset)
# The model weights become [0, 0, 1] after training one epoch, which gives
# an MSE 0.0 and MAE 0.0.
self.assertDictContainsSubset({
'loss': 0.0,
'mean_absolute_error': 0.0
}, p13n_metrics['epoch_1'])
# The model is trained for one epoch, so the final model has the same
# metrics as those in `epoch_1`.
self.assertDictContainsSubset({
'loss': 0.0,
'mean_absolute_error': 0.0
}, p13n_metrics['final_model'])
# The total number of training examples is 3.
self.assertEqual(p13n_metrics['num_examples'], 3)
# The batch size is set to 2 in `p13n_fn`, so training data has 2 batches.
self.assertEqual(p13n_metrics['num_batches'], 2)
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
tf.test.main()
|
Python
| 0.000009 |
@@ -881,44 +881,21 @@
%22%0A
-ds = collections.OrderedDict(%5B('x',
+x = np.array(
%5B%5B-1
@@ -933,71 +933,124 @@
0%5D%5D)
-,
%0A
- ('y', %5B%5B1.0%5D, %5B1.0%5D, %5B1.0%5D%5D)%5D
+y = np.array(%5B%5B1.0%5D, %5B1.0%5D, %5B1.0%5D%5D)%0A ds = collections.OrderedDict(x=x.astype(np.float32), y=y.astype(np.float32)
)%0A
@@ -1381,27 +1381,26 @@
puts)%0A
-dummy_batch
+input_spec
= colle
@@ -1423,95 +1423,54 @@
ct(%5B
-('x', np.zeros(%5B1, 2%5D,%0A
+%0A ('x', tf.TensorSpec(%5BNone, 2%5D,
dtype=
-np
+tf
.flo
@@ -1487,130 +1487,47 @@
- ('y', np.zeros(%5B1, 1%5D,%0A
+('y', tf.TensorSpec(%5BNone, 1%5D,
dtype=
-np
+tf
.flo
@@ -1532,16 +1532,19 @@
loat32))
+%0A
%5D)%0A ret
@@ -1619,31 +1619,29 @@
-dummy_batch=dummy_batch
+input_spec=input_spec
,%0A
@@ -2713,24 +2713,102 @@
MAE 0.0.%0A
+ # TODO(b/150599675): restore the following check once the bug is fixed.%0A #
self.assert
@@ -2824,36 +2824,38 @@
insSubset(%7B%0A
+#
+
'loss': 0.0,%0A
@@ -2846,32 +2846,34 @@
'loss': 0.0,%0A
+ #
'mean_absol
@@ -2883,32 +2883,34 @@
_error': 0.0%0A
+ #
%7D, p13n_metrics
|
5ae8b38eba7e04effa530d8419df05840bc2478e
|
Fix flake8 warning
|
unyt/__init__.py
|
unyt/__init__.py
|
"""
The unyt package.
Note that the symbols defined in :mod:`unyt.physical_constants` and
:mod:`unyt.unit_symbols` are importable from this module. For example::
>>> from unyt import km, clight
>>> print((km/clight).to('ns'))
3335.64095198152 ns
In addition, the following functions and classes are importable from the
top-level ``unyt`` namespace:
* :func:`unyt.array.loadtxt`
* :func:`unyt.array.savetxt`
* :func:`unyt.test`
* :func:`unyt.array.uconcatenate`
* :func:`unyt.array.ucross`
* :func:`unyt.array.udot`
* :func:`unyt.array.uhstack`
* :func:`unyt.array.uintersect1d`
* :func:`unyt.array.unorm`
* :func:`unyt.array.ustack`
* :func:`unyt.array.uunion1d`
* :func:`unyt.array.uvstack`
* :class:`unyt.array.unyt_array`
* :class:`unyt.array.unyt_quantity`
* :func:`unyt.unit_object.define_unit`
* :class:`unyt.unit_object.Unit`
* :class:`unyt.unit_registry.UnitRegistry`
* :class:`unyt.unit_systems.UnitSystem`
* :func:`unyt.testing.assert_allclose_units`
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2018, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the LICENSE file, distributed with this software.
# -----------------------------------------------------------------------------
try:
import numpy as np
try:
from pkg_resources import parse_version
npv = np.__version__
if parse_version(npv) < parse_version("1.13.0"): # pragma: no cover
raise RuntimeError(
"The unyt package requires NumPy 1.13 or newer but NumPy %s "
"is installed" % npv
)
del parse_version, npv
except ImportError: # pragma: no cover
# setuptools isn't installed so we don't try to check version numbers
pass
del np
except ImportError: # pragma: no cover
raise RuntimeError("The unyt package requires numpy but numpy is not installed.")
try:
import sympy
del sympy
except ImportError: # pragma: no cover
raise RuntimeError("The unyt package requires sympy but sympy is not installed.")
from ._version import get_versions
from unyt import unit_symbols
from unyt import physical_constants
from unyt.array import ( # NOQA: F401
loadtxt,
savetxt,
uconcatenate,
ucross,
udot,
uhstack,
uintersect1d,
unorm,
ustack,
uunion1d,
uvstack,
unyt_array,
unyt_quantity,
)
from unyt.unit_object import Unit, define_unit # NOQA: F401
from unyt.unit_registry import UnitRegistry # NOQA: F401
from unyt.unit_systems import UnitSystem # NOQA: F401
from unyt.testing import assert_allclose_units
# function to only import quantities into this namespace
# we go through the trouble of doing this instead of "import *"
# to avoid including extraneous variables (e.g. floating point
# constants used to *construct* a physical constant) in this namespace
def import_units(module, namespace):
"""Import Unit objects from a module into a namespace"""
for key, value in module.__dict__.items():
if isinstance(value, (unyt_quantity, Unit)):
namespace[key] = value
import_units(unit_symbols, globals())
import_units(physical_constants, globals())
del import_units
__version__ = get_versions()["version"]
del get_versions
def test(): # pragma: no cover
"""Execute the unit tests on an installed copy of unyt.
Note that this function requires pytest to run. If pytest is not
installed this function will raise ImportError.
"""
import pytest
import os
pytest.main([os.path.dirname(os.path.abspath(__file__))])
|
Python
| 0 |
@@ -2685,16 +2685,30 @@
se_units
+ # NOQA: F401
%0A%0A%0A# fun
|
0caec903579e4cf3f22ea3e5ea1df3ecd8ad0fe3
|
remove nigthly test hgemm_asm
|
test/nightly.py
|
test/nightly.py
|
#
# These nightly tests are slow but have good coverage. Fast tests with less coverage are in pre_checkin.py.
#
# To execute this test file, apt-get install python-pytest, then
# PYTHONPATH=. py.test -v test/nightly.py
#
# To run test directly, with complete output:
# mkdir build && cd build
# python ../Tensile/Tensile.py ../Tensile/Configs/test_hgemm_defaults.yaml ./
#
import Tensile.Tensile as Tensile
# defaults
def test_hgemm_defaults(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_hgemm_defaults.yaml"), tmpdir.strpath])
def test_sgemm_defaults(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_sgemm_defaults.yaml"), tmpdir.strpath])
def test_dgemm_defaults(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_dgemm_defaults.yaml"), tmpdir.strpath])
# thorough tests
def test_hgemm(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_hgemm.yaml"), tmpdir.strpath])
def test_sgemm(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_sgemm.yaml"), tmpdir.strpath])
# vectors
def test_hgemm_vectors(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_hgemm_vectors.yaml"), tmpdir.strpath])
def test_sgemm_vectors(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_sgemm_vectors.yaml"), tmpdir.strpath])
# tensor convolution
def test_tensor_convolution(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_convolution.yaml"), tmpdir.strpath])
# tensor contractions
def test_tensor_contraction(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_tensor_contraction.yaml"), tmpdir.strpath])
# assembly
def test_hgemm_asm(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_hgemm_asm.yaml"), tmpdir.strpath])
def test_sgemm_asm(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_sgemm_asm.yaml"), tmpdir.strpath])
def test_dgemm_asm(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_dgemm_asm.yaml"), tmpdir.strpath])
|
Python
| 0.016036 |
@@ -1612,124 +1612,8 @@
bly%0A
-def test_hgemm_asm(tmpdir):%0A Tensile.Tensile(%5BTensile.TensileConfigPath(%22test_hgemm_asm.yaml%22), tmpdir.strpath%5D)%0A
def
|
018da9b9a268900344a8d72230b6315d4aaeb1ae
|
Fix MusicBrainz sample.
|
test/samples.py
|
test/samples.py
|
#!/usr/bin/env python3
# Copyright (c) 2009, Karoly Lorentey <[email protected]>
import unittest
import os
import os.path
import warnings
import stagger
from stagger.id3 import *
def list_id3(path):
for root, dirs, files in os.walk(path):
dirs.sort()
for file in sorted(files):
if file.endswith(".id3"):
yield os.path.join(root, file)
def generate_test(file):
def test(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", stagger.Warning)
tag = stagger.read_tag(file)
prefix_to_class = {
"22.": stagger.Tag22,
"23.": stagger.Tag23,
"24.": stagger.Tag24
}
# Check tag version based on filename prefix
basename = os.path.basename(file)
self.assertTrue(any(basename.startswith(prefix) for prefix in prefix_to_class))
for prefix in prefix_to_class:
if basename.startswith(prefix):
self.assertEqual(type(tag), prefix_to_class[prefix])
self.assertEqual(tag.version, int(prefix[1]))
if basename.endswith(".lossy.id3"):
# Don't try to match generated tag to original when stagger is
# explicitly expected to modify the tag.
return
# Scrub iTunes-produced invalid frames with frameids ending with space.
# Stagger won't save these, so they would result in a tag mismatch below.
for key in list(tag.keys()):
if key.endswith(" "):
del tag[key]
tag.padding_max = 0
data = tag.encode()
tag2 = stagger.decode_tag(data)
tag.padding_max = 0
data2 = tag.encode()
self.assertEqual(data, data2, "data mismatch in file {0}".format(file))
self.assertEqual(tag, tag2, "tag mismatch in file{0}".format(file))
return test
class SamplesTestCase(unittest.TestCase):
def tearDown(self):
# Clean warning registries, allowing warnings to be recorded again.
for module in stagger.tags, stagger.frames, stagger.id3, stagger.specs:
if hasattr(module, "__warningregistry__"):
del module.__warningregistry__
def testID3v2ExtendedHeader(self):
# First sample simply includes an empty extended header.
tag1 = stagger.read_tag(os.path.join(sample_dir,
"23.synthetic.empty-extended-header.lossy.id3"))
self.assertEqual(tag1.title, "The Millionaire's Holiday")
self.assertEqual(tag1.album, "Best Of Combustible Edison")
self.assertEqual(tag1.date, "1997")
self.assertEqual(tag1.track, 1)
self.assertEqual(tag1.genre, "Foobar")
self.assertEqual(tag1.artist, "Combustible Edison")
self.assertEqual(tag1.comment, " 0000132D 0000132D 00002FF0")
self.assertEqual(tag1.flags, { "extended_header" })
# Second sample file has an (invalid) CRC32 number in its extended header.
tag2 = stagger.read_tag(os.path.join(sample_dir,
"23.synthetic.extended-header-bad-crc.lossy.id3"))
self.assertEqual(tag2.title, "The Millionaire's Holiday")
self.assertEqual(tag2.album, "Best Of Combustible Edison")
self.assertEqual(tag2.date, "1997")
self.assertEqual(tag2.track, 1)
self.assertEqual(tag2.genre, "Foobar")
self.assertEqual(tag2.artist, "Combustible Edison")
self.assertEqual(tag2.comment, " 0000132D 0000132D 00002FF0")
self.assertEqual(tag2.flags, { "ext:crc_present", "extended_header" })
self.assertEqual(tag2.crc32, 0x20202020)
def testIssue37(self):
# Check that duplicate frames are handled OK.
# The sample file contains two TALB frames ("quux" and "Foo").
# This is invalid according to the spec.
tag = stagger.read_tag(os.path.join(sample_dir,
"24.issue37.stagger.duplicate-talb.id3"))
# The friendly API should just concatenate the frames, as if they were
# a single multivalued text frame.
self.assertEqual(tag.album, "quux / Foo")
# Ditto for the magical dictionary API.
self.assertEqual(tag[TALB], TALB(encoding=0, text=["quux", "Foo"]))
# However, both getframes() and frames() should show two separate frames.
self.assertEqual(tag.frames(TALB), [TALB(encoding=0, text="quux"),
TALB(encoding=0, text="Foo")])
self.assertEqual(tag.frames(orig_order=True),
[TIT2(encoding=0, text="Foobar"),
TALB(encoding=0, text="quux"),
TALB(encoding=0, text="Foo")])
sample_dir = os.path.join(os.path.dirname(__file__), "samples")
for file in list_id3(sample_dir):
method = "test_" + os.path.basename(file).replace(".", "_")
setattr(SamplesTestCase, method, generate_test(file))
suite = unittest.TestLoader().loadTestsFromTestCase(SamplesTestCase)
if __name__ == "__main__":
warnings.simplefilter("always", stagger.Warning)
unittest.main(defaultTest="suite")
|
Python
| 0 |
@@ -1369,16 +1369,17 @@
return%0A%0A
+%0A
@@ -1394,70 +1394,28 @@
rub
-iTunes-produced invalid frames with frameids ending with space
+known invalid frames
.%0A
@@ -1576,16 +1576,127 @@
th(%22 %22):
+ # iTunes%0A del tag%5Bkey%5D%0A if tag.version == 4 and key == %22XSOP%22: # MusicBrainz
%0A
|
931cfb8025f45535b3bb839ebfa5191074a76b15
|
Fix log capture on py3
|
test/service.py
|
test/service.py
|
import logging
import re
import select
import subprocess
import threading
import time
__all__ = [
'ExternalService',
'SpawnedService',
]
class ExternalService(object):
def __init__(self, host, port):
logging.info("Using already running service at %s:%d", host, port)
self.host = host
self.port = port
def open(self):
pass
def close(self):
pass
class SpawnedService(threading.Thread):
def __init__(self, args=None, env=None):
threading.Thread.__init__(self)
if args is None:
raise TypeError("args parameter is required")
self.args = args
self.env = env
self.captured_stdout = []
self.captured_stderr = []
self.should_die = threading.Event()
def run(self):
self.run_with_handles()
def run_with_handles(self):
self.child = subprocess.Popen(
self.args,
env=self.env,
bufsize=1,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
alive = True
while True:
(rds, _, _) = select.select([self.child.stdout, self.child.stderr], [], [], 1)
if self.child.stdout in rds:
line = self.child.stdout.readline()
self.captured_stdout.append(line)
if self.child.stderr in rds:
line = self.child.stderr.readline()
self.captured_stderr.append(line)
if self.should_die.is_set():
self.child.terminate()
alive = False
poll_results = self.child.poll()
if poll_results is not None:
if not alive:
break
else:
self.dump_logs()
raise RuntimeError("Subprocess has died. Aborting. (args=%s)" % ' '.join(str(x) for x in self.args))
def dump_logs(self):
logging.critical('stderr')
for line in self.captured_stderr:
logging.critical(line.rstrip())
logging.critical('stdout')
for line in self.captured_stdout:
logging.critical(line.rstrip())
def wait_for(self, pattern, timeout=30):
t1 = time.time()
while True:
t2 = time.time()
if t2 - t1 >= timeout:
try:
self.child.kill()
except:
logging.exception("Received exception when killing child process")
self.dump_logs()
raise RuntimeError("Waiting for %r timed out after %d seconds" % (pattern, timeout))
if re.search(pattern, '\n'.join(self.captured_stdout), re.IGNORECASE) is not None:
logging.info("Found pattern %r in %d seconds via stdout", pattern, (t2 - t1))
return
if re.search(pattern, '\n'.join(self.captured_stderr), re.IGNORECASE) is not None:
logging.info("Found pattern %r in %d seconds via stderr", pattern, (t2 - t1))
return
time.sleep(0.1)
def start(self):
threading.Thread.start(self)
def stop(self):
self.should_die.set()
self.join()
|
Python
| 0 |
@@ -1309,32 +1309,48 @@
dout.append(line
+.decode('utf-8')
)%0A%0A i
|
94fc7881052fea4e7d83f35e41fab4f5ed108f34
|
fix styling
|
spectate/utils.py
|
spectate/utils.py
|
from collections.abc import Mapping
class Sentinel:
__slots__ = "_name"
def __init__(self, name):
self._name = name
def __repr__(self):
return self._name # pragma: no cover
|
Python
| 0.000001 |
@@ -1,42 +1,4 @@
-from collections.abc import Mapping%0A%0A%0A
clas
@@ -33,16 +33,17 @@
%22_name%22%0A
+%0A
def
|
243a3e185a8984e3d0daa0bb19a4e9a3ae11d7c7
|
Fix student ranking detail page
|
app/soc/modules/gci/views/models/student_ranking.py
|
app/soc/modules/gci/views/models/student_ranking.py
|
#!/usr/bin/env python2.5
#
# Copyright 2010 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Views for Student Ranking.
"""
__authors__ = [
'"Daniel Hans" <[email protected]>',
]
from soc.views.models import base
from soc.logic import dicts
from soc.views import helper
from soc.views.helper import decorators
from soc.views.helper import lists
from soc.modules.gci.logic.models.task import logic as gci_task_logic
from soc.modules.gci.views.helper import access as gci_access
from soc.modules.gci.views.models import program as gci_program_view
import soc.modules.gci.logic.models.student_ranking
class View(base.View):
"""View methods for the Tasks.
"""
DETAILS_MSG_FMT = 'Ranking details for %s.'
def __init__(self, params=None):
"""Defines the fields and methods required for the task View class
to provide the user with the necessary views.
Params:
params: a dict with params for this View
"""
rights = gci_access.GCIChecker(params)
rights['any_access'] = ['allow']
new_params = {}
new_params['logic'] = soc.modules.gci.logic.models.student_ranking.logic
new_params['rights'] = rights
new_params['name'] = "Student Ranking"
new_params['module_name'] = "student_ranking"
new_params['sidebar_grouping'] = 'Student Rankings'
new_params['module_package'] = 'soc.modules.gci.views.models'
new_params['url_name'] = 'gci/student_ranking'
new_params['scope_view'] = gci_program_view
patterns = []
patterns += [
(r'^%(url_name)s/(?P<access_type>show_details)/%(key_fields)s$',
'%(module_package)s.%(module_name)s.show_details',
'Show ranking details.'),
]
new_params['extra_django_patterns'] = patterns
params = dicts.merge(params, new_params, sub_merge=True)
super(View, self).__init__(params=params)
@decorators.merge_params
def showDetails(self, request, access_type,
page_name=None, params=None, **kwargs):
"""Shows ranking details for the entity specified by **kwargs.
Args:
request: the standard Django HTTP request object
access_type : the name of the access type which should be checked
page_name: the page name displayed in templates as page and header title
params: a dict with params for this View
kwargs: the Key Fields for the specified entity
"""
logic = params['logic']
ranking = logic.getFromKeyFields(kwargs)
student = ranking.student
list_params = params.copy()
list_params['list_description'] = self.DETAILS_MSG_FMT % student.user.name
list_params['public_field_extra'] = lambda entity: {
'task': entity.title,
'org': entity.scope.name,
'points_difficulty': entity.difficulty.value
}
list_params['public_field_keys'] = [
'task', 'org', 'points_difficulty', 'closed_on']
list_params['public_field_names'] = [
'Task', 'Organization', 'Points (Difficulty)', 'Completed on']
if lists.isDataRequest(request):
return self.getListRankingDetailsData(request, list_params, student)
contents = []
order = ['closed_on']
list = lists.getListGenerator(request, list_params, order=order, idx=0)
contents.append(list)
return self._list(request, list_params, contents, page_name)
def getListRankingDetailsData(self, request, params, student):
"""Returns the list data for Ranking Details list.
Args:
request: HTTPRequest object
params_collection: List of list Params indexed with the idx of the list
org_entity: GCIOrganization entity for which the lists are generated
"""
filter = {
'student': student,
'status': 'Closed',
}
visibility = 'public'
args = []
params['logic'] = gci_task_logic
contents = lists.getListData(request, params, filter,
visibility=visibility, args=args)
return lists.getResponse(request, contents)
view = View()
show_details = decorators.view(view.showDetails)
|
Python
| 0.000001 |
@@ -3269,17 +3269,21 @@
entity.
-d
+taskD
ifficult
@@ -3283,16 +3283,18 @@
fficulty
+()
.value%0A
|
800ef1d1305f125695073732f4b6155d6f0cb445
|
Update rasa/cli/interactive.py
|
rasa/cli/interactive.py
|
rasa/cli/interactive.py
|
import argparse
import os
from typing import List, Text
import rasa.cli.train as train
from rasa.cli.arguments import interactive as arguments
from rasa import data, model
# noinspection PyProtectedMember
from rasa.cli.utils import get_validated_path, print_error
from rasa.constants import (
DEFAULT_DATA_PATH,
DEFAULT_MODELS_PATH,
DEFAULT_ENDPOINTS_PATH,
)
from rasa.model import get_latest_model
def add_subparser(
subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser]
):
interactive_parser = subparsers.add_parser(
"interactive",
conflict_handler="resolve",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Starts an interactive learning session to create new training data for a "
"Rasa model by chatting.",
)
interactive_parser.set_defaults(func=interactive)
interactive_parser.add_argument(
"--e2e", action="store_true", help="save file in e2e format"
)
interactive_subparsers = interactive_parser.add_subparsers()
interactive_core_parser = interactive_subparsers.add_parser(
"core",
conflict_handler="resolve",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Starts an interactive learning session model to create new training data "
"for a Rasa Core model by chatting. Uses the 'RegexInterpreter', i.e. "
"`/<intent>` input format.",
)
interactive_core_parser.set_defaults(func=interactive_core)
arguments.set_interactive_arguments(interactive_parser)
arguments.set_interactive_core_arguments(interactive_core_parser)
def interactive(args: argparse.Namespace):
args.fixed_model_name = None
args.store_uncompressed = False
if args.model is None:
check_training_data(args)
zipped_model = train.train(args)
else:
zipped_model = get_provided_model(args.model)
perform_interactive_learning(args, zipped_model)
def interactive_core(args: argparse.Namespace):
args.fixed_model_name = None
args.store_uncompressed = False
if args.model is None:
zipped_model = train.train_core(args)
else:
zipped_model = get_provided_model(args.model)
perform_interactive_learning(args, zipped_model)
def perform_interactive_learning(args, zipped_model):
from rasa.core.train import do_interactive_learning
if zipped_model and os.path.exists(zipped_model):
args.model = zipped_model
with model.unpack_model(zipped_model) as model_path:
args.core, args.nlu = model.get_model_subdirectories(model_path)
stories_directory = data.get_core_directory(args.data)
args.endpoints = get_validated_path(
args.endpoints, "endpoints", DEFAULT_ENDPOINTS_PATH, True
)
do_interactive_learning(args, stories_directory)
else:
print_error(
"Interactive learning process cannot be started as no initial model was "
"found. Use 'rasa train' to train a model."
)
def get_provided_model(arg_model: Text):
model_path = get_validated_path(arg_model, "model", DEFAULT_MODELS_PATH)
if os.path.isdir(model_path):
model_path = get_latest_model(model_path)
return model_path
def check_training_data(args):
training_files = [
get_validated_path(f, "data", DEFAULT_DATA_PATH, none_is_valid=True)
for f in args.data
]
story_files, nlu_files = data.get_core_nlu_files(training_files)
if not story_files or not nlu_files:
print_error(
"Cannot train initial Rasa model. Please provide NLU and Core data "
"using the '--data' argument."
)
exit(1)
|
Python
| 0 |
@@ -979,17 +979,24 @@
lp=%22
-s
+S
ave
+story
file
+s
in
@@ -1005,16 +1005,79 @@
e format
+. In this format user messages will be included in the stories.
%22%0A )%0A
|
7329757e1ad30e327c1ae823a8302c79482d6b9c
|
Update BUILD_OSS to 4632
|
src/data/version/mozc_version_template.bzl
|
src/data/version/mozc_version_template.bzl
|
# Copyright 2010-2021, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
MAJOR = 2
MINOR = 26
# BUILD number used for the OSS version.
BUILD_OSS = 4624
# Number to be increased. This value may be replaced by other tools.
BUILD = BUILD_OSS
# Represent the platform and release channel.
REVISION = 100
REVISION_MACOS = REVISION + 1
# This version represents the version of Mozc IME engine (converter, predictor,
# etc.). This version info is included both in the Mozc server and in the Mozc
# data set file so that the Mozc server can accept only the compatible version
# of data set file. The engine version must be incremented when:
# * POS matcher definition and/or conversion models were changed,
# * New data are added to the data set file, and/or
# * Any changes that loose data compatibility are made.
ENGINE_VERSION = 24
# This version is used to manage the data version and is included only in the
# data set file. DATA_VERSION can be incremented without updating
# ENGINE_VERSION as long as it's compatible with the engine.
# This version should be reset to 0 when ENGINE_VERSION is incremented.
DATA_VERSION = 10
|
Python
| 0 |
@@ -1605,18 +1605,18 @@
OSS = 46
+3
2
-4
%0A%0A# Numb
|
e243e907e58047e18c0a16e061f7aa718e3b5854
|
Remove unavailable imports
|
statsmodels/compat/__init__.py
|
statsmodels/compat/__init__.py
|
from .python import ( # noqa:F401
PY3, PY37,
bytes, str, unicode, string_types,
asunicode, asbytes, asstr, asstr2, asunicode_nested, asbytes_nested,
range, zip, filter, map,
lrange, lzip, lmap, lfilter,
cStringIO, StringIO, BytesIO,
cPickle, pickle,
iteritems, iterkeys, itervalues,
urlopen, urljoin, urlencode, HTTPError, URLError,
reduce, long, unichr, zip_longest,
strchar,
isfileobj,
open_latin1,
builtins,
getargspec,
input,
getexception,
advance_iterator, next,
callable,
get_function_name, get_class
)
|
Python
| 0 |
@@ -124,42 +124,8 @@
tr2,
- asunicode_nested, asbytes_nested,
%0A
@@ -376,173 +376,539 @@
-strchar,%0A isfileobj,%0A open_latin1,%0A builtins,%0A getargspec,%0A input,%0A getexception,%0A advance_iterator, next,%0A callable,%0A get_function_name,
+builtins,%0A getargspec,%0A next, get_class%0A)%0A%0A__all__ = %5B'PY3', 'PY37', 'bytes', 'str', 'unicode', 'string_types',%0A 'asunicode', 'asbytes', 'asstr', 'asstr2', 'range', 'zip',%0A 'filter', 'map', 'lrange', 'lzip', 'lmap', 'lfilter', 'cStringIO',%0A 'StringIO', 'BytesIO', 'cPickle', 'pickle', 'iteritems',%0A 'iterkeys', 'itervalues', 'urlopen', 'urljoin', 'urlencode',%0A 'HTTPError', 'URLError', 'reduce', 'long', 'unichr', 'zip_longest',%0A 'builtins', 'getargspec', 'next', '
get_
@@ -904,19 +904,19 @@
ext', 'get_class
-%0A)
+'%5D
%0A
|
0629b30ade8b619697e8cc28d651904e742cd70e
|
Correct inst method names in system info, add Docker version (#36360)
|
homeassistant/helpers/system_info.py
|
homeassistant/helpers/system_info.py
|
"""Helper to gather system info."""
import os
import platform
from typing import Dict
from homeassistant.const import __version__ as current_version
from homeassistant.loader import bind_hass
from homeassistant.util.package import is_virtual_env
from .typing import HomeAssistantType
@bind_hass
async def async_get_system_info(hass: HomeAssistantType) -> Dict:
"""Return info about the system."""
info_object = {
"installation_type": "Unknown",
"version": current_version,
"dev": "dev" in current_version,
"hassio": hass.components.hassio.is_hassio(),
"virtualenv": is_virtual_env(),
"python_version": platform.python_version(),
"docker": False,
"arch": platform.machine(),
"timezone": str(hass.config.time_zone),
"os_name": platform.system(),
"os_version": platform.release(),
}
if platform.system() == "Windows":
info_object["os_version"] = platform.win32_ver()[0]
elif platform.system() == "Darwin":
info_object["os_version"] = platform.mac_ver()[0]
elif platform.system() == "Linux":
info_object["docker"] = os.path.isfile("/.dockerenv")
# Determine installation type on current data
if info_object["docker"]:
info_object["installation_type"] = "Home Assistant Core on Docker"
elif is_virtual_env():
info_object[
"installation_type"
] = "Home Assistant Core in a Python Virtual Environment"
# Enrich with Supervisor information
if hass.components.hassio.is_hassio():
info = hass.components.hassio.get_info()
host = hass.components.hassio.get_host_info()
info_object["supervisor"] = info.get("supervisor")
info_object["host_os"] = host.get("operating_system")
info_object["chassis"] = host.get("chassis")
if info.get("hassos") is not None:
info_object["installation_type"] = "Home Assistant"
else:
info_object["installation_type"] = "Home Assistant Supervised"
return info_object
|
Python
| 0 |
@@ -1322,18 +1322,13 @@
t Co
-re on Dock
+ntain
er%22%0A
@@ -1374,29 +1374,16 @@
_object%5B
-%0A
%22install
@@ -1393,25 +1393,16 @@
on_type%22
-%0A
%5D = %22Hom
@@ -1421,40 +1421,8 @@
Core
- in a Python Virtual Environment
%22%0A%0A
@@ -1781,16 +1781,75 @@
hassis%22)
+%0A info_object%5B%22docker_version%22%5D = info.get(%22docker%22)
%0A%0A
|
7db13e76adda42f458f3564884c7b32909f3ec87
|
Fix for issue #220
|
hpedockerplugin/request_validator.py
|
hpedockerplugin/request_validator.py
|
from collections import OrderedDict
from oslo_log import log as logging
import hpedockerplugin.exception as exception
LOG = logging.getLogger(__name__)
def validate_request(contents):
operations_map = OrderedDict()
operations_map['virtualCopyOf,scheduleName'] = \
_validate_snapshot_schedule_opts
operations_map['virtualCopyOf,scheduleFrequency'] = \
_validate_snapshot_schedule_opts
operations_map['virtualCopyOf,snaphotPrefix'] = \
_validate_snapshot_schedule_opts
operations_map['virtualCopyOf'] = \
_validate_snapshot_opts
operations_map['cloneOf'] = \
_validate_clone_opts
operations_map['importVol'] = \
_validate_import_vol_opts
operations_map['replicationGroup'] = \
_validate_rcg_opts
if 'Opts' in contents:
_validate_mutually_exclusive_ops(contents)
validated = False
for op_name, validator in operations_map.items():
op_name = op_name.split(',')
found = not (set(op_name) - set(contents['Opts'].keys()))
if found:
validator(contents)
validated = True
break
# Validate regular volume options
if not validated:
validate_create_volume_opts(contents)
def _validate_mutually_exclusive_ops(contents):
mutually_exclusive_ops = ['virtualCopyOf', 'cloneOf', 'importVol',
'replicationGroup']
if 'Opts' in contents:
received_opts = contents.get('Opts').keys()
diff = set(mutually_exclusive_ops) - set(received_opts)
if len(diff) < len(mutually_exclusive_ops) - 1:
mutually_exclusive_ops.sort()
msg = "Operations %s are mutually exclusive and cannot " \
"be specified together. Please check help for usage." % \
mutually_exclusive_ops
raise exception.InvalidInput(reason=msg)
def _validate_opts(operation, contents, valid_opts, mandatory_opts=None):
if 'Opts' in contents:
received_opts = contents.get('Opts').keys()
if mandatory_opts:
diff = set(mandatory_opts) - set(received_opts)
if diff:
# Print options in sorted manner
mandatory_opts.sort()
msg = "One or more mandatory options %s are missing for " \
"operation %s" % (mandatory_opts, operation)
raise exception.InvalidInput(reason=msg)
diff = set(received_opts) - set(valid_opts)
if diff:
diff = list(diff)
diff.sort()
msg = "Invalid option(s) %s specified for operation %s. " \
"Please check help for usage." % \
(diff, operation)
raise exception.InvalidInput(reason=msg)
def validate_create_volume_opts(contents):
valid_opts = ['compression', 'size', 'provisioning',
'flash-cache', 'qos-name', 'fsOwner',
'fsMode', 'mountConflictDelay', 'cpg',
'snapcpg', 'backend']
_validate_opts("create volume", contents, valid_opts)
def _validate_clone_opts(contents):
valid_opts = ['cloneOf', 'size', 'cpg', 'snapcpg']
_validate_opts("clone volume", contents, valid_opts)
def _validate_snapshot_opts(contents):
valid_opts = ['virtualCopyOf', 'retentionHours', 'expirationHours']
_validate_opts("create snapshot", contents, valid_opts)
def _validate_snapshot_schedule_opts(contents):
valid_opts = ['virtualCopyOf', 'retentionHours', 'scheduleFrequency',
'scheduleName', 'snapshotPrefix', 'expHrs', 'retHrs']
mandatory_opts = ['scheduleName', 'snapshotPrefix', 'scheduleFrequency']
_validate_opts("create snapshot schedule", contents,
valid_opts, mandatory_opts)
def _validate_import_vol_opts(contents):
valid_opts = ['importVol', 'backend']
_validate_opts("import volume", contents, valid_opts)
def _validate_rcg_opts(contents):
valid_opts = ['replicationGroup', 'size', 'provisioning',
'backend', 'mountConflictDelay']
_validate_opts('create replicated volume', contents, valid_opts)
|
Python
| 0 |
@@ -779,16 +779,64 @@
rcg_opts
+%0A operations_map%5B'help'%5D = _validate_help_opt
%0A%0A if
@@ -4180,16 +4180,31 @@
ctDelay'
+, 'compression'
%5D%0A _v
@@ -4246,28 +4246,147 @@
ume', contents, valid_opts)%0A
+%0A%0Adef _validate_help_opt(contents):%0A valid_opts = %5B'help'%5D%0A _validate_opts('display help', contents, valid_opts)%0A
|
bc638d11be50f8480d1f103d3a25484c6ccb52b7
|
clean code in disocvery_json_view.py and add comments
|
hs_core/views/discovery_json_view.py
|
hs_core/views/discovery_json_view.py
|
import simplejson as json
from django.http import HttpResponse
from haystack.query import SearchQuerySet
from django import forms
from haystack.forms import FacetedSearchForm
from haystack.generic_views import FacetedSearchView
from django.core import serializers
from hs_core.discovery_form import DiscoveryForm
class DiscoveryJsonView(FacetedSearchView):
facet_fields = ['author', 'subjects', 'resource_type', 'public', 'owners_names', 'discoverable']
form_class = DiscoveryForm
def form_valid(self, form):
coor_values = []
coordinate_dictionary = []
self.queryset = form.search()
if len(self.request.GET):
for result in self.get_queryset():
json_obj = {}
json_obj['title'] = result.object.title
json_obj['get_absolute_url'] = result.object.get_absolute_url()
for coverage in result.object.metadata.coverages.all():
if coverage.type == 'point':
json_obj['coverage_type'] = coverage.type
json_obj['east'] = coverage.value['east']
json_obj['north'] = coverage.value['north']
elif coverage.type == 'box':
json_obj['coverage_type'] = coverage.type
json_obj['northlimit'] = coverage.value['northlimit']
json_obj['eastlimit'] = coverage.value['eastlimit']
json_obj['southlimit'] = coverage.value['southlimit']
json_obj['westlimit'] = coverage.value['westlimit']
else:
continue
coor_obj = json.dumps(json_obj)
coor_values.append(coor_obj)
the_data = json.dumps(coor_values)
return HttpResponse(the_data, content_type='application/json')
|
Python
| 0 |
@@ -74,244 +74,219 @@
ack.
-query import SearchQuerySet%0Afrom django import forms%0Afrom haystack.forms import FacetedSearchForm%0A
+generic_views import FacetedSearchView%0Afrom hs_core.discovery_form import DiscoveryForm%0A%0A# View class for generating JSON data format
from
-h
+H
aystack
-.generic_views import FacetedSearchView%0Afrom django.core import serializers%0Afrom hs_core.discovery_form import DiscoveryForm%0A
+%0A# returned JSON objects array is used for building the map view
%0Acla
@@ -326,16 +326,39 @@
hView):%0A
+ # set facet fields%0A
face
@@ -458,276 +458,730 @@
-form_class = DiscoveryForm%0A%0A def form_valid(self, form):%0A coor_values = %5B%5D%0A coordinate_dictionary = %5B%5D%0A self.queryset = form.search()%0A if len(self.request.GET):%0A for result in self.get_queryset():%0A json_obj = %7B%7D
+# declare form class to use in this view%0A form_class = DiscoveryForm%0A%0A # overwrite Haystack generic_view.py form_valid() function to generate JSON response%0A def form_valid(self, form):%0A # initialize an empty array for holding the result objects with coordinate values%0A coor_values = %5B%5D%0A # get query set%0A self.queryset = form.search()%0A%0A # When we have a GET request with search query, build our JSON objects array%0A if len(self.request.GET):%0A%0A # iterate all the search results%0A for result in self.get_queryset():%0A # initialize a null JSON object%0A json_obj = %7B%7D%0A%0A # assign title and url values to the object
%0A
@@ -1312,16 +1312,67 @@
te_url()
+%0A%0A # iterate all the coverage values
%0A
@@ -1433,24 +1433,125 @@
ges.all():%0A%0A
+ # if coverage type is point, assign 'east' and 'north' coordinates to the object%0A
@@ -1787,16 +1787,154 @@
north'%5D%0A
+ # elif coverage type is box, assign 'northlimit', 'eastlimit', 'southlimit' and 'westlimit' coordinates to the object%0A
@@ -2340,32 +2340,65 @@
ue%5B'westlimit'%5D%0A
+ # else, skip%0A
@@ -2440,16 +2440,67 @@
ontinue%0A
+ # encode object to JSON format%0A
@@ -2567,36 +2567,154 @@
-coor_values.append(coor_obj)
+# add JSON object the results array%0A coor_values.append(coor_obj)%0A%0A # encode the results results array to JSON array
%0A
@@ -2757,16 +2757,51 @@
values)%0A
+ # return JSON response%0A
|
c1b19af7229d582f7bd474a05a679cf45e3c9bf8
|
add proxy + fix import modules
|
tests/basics.py
|
tests/basics.py
|
# -*- coding: utf-8 -*-
"""
@author: Nicolas Rivet
test the connection to IG API
do some basic operations
"""
from ig.ig_service import IGservice as igs
import ig.ig_tools as igt
def main():
"""Main module for testing."""
#get config for demo API
proxy_user, proxy_password, api_key, username, password, account = \
igt.getconfig('demo')
#login demo API
service=igs(username, password, api_key, account, 'demo')
log=igs.login(service)
print(log[0])
#get newest bidask
instrument='CS.D.EURUSD.CFD.IP'
bidask=igs.get_bidask(service, instrument)
print(bidask)
#get historical closes
resolution='MINUTE'
max_size=10
closes=igs.get_closes(service, instrument, resolution, max_size)
print(closes)
if __name__ == '__main__':
main()
|
Python
| 0 |
@@ -115,36 +115,133 @@
%0D%0A%0D%0A
-from ig.ig_service
+import sys%0D%0Aimport os%0D%0Asys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'ig')))%0D%0A%0D%0A
import
-IG
+ig_
serv
@@ -263,11 +263,8 @@
ort
-ig.
ig_t
@@ -506,16 +506,26 @@
vice=igs
+.IGservice
(usernam
@@ -565,55 +565,98 @@
emo'
-)%0D%0A log=igs.login(service)%0D%0A print(log%5B0%5D
+, proxy_user, proxy_password)%0D%0A log=service.login()%0D%0A print('%5Cn', 'login', '%5Cn', log
)%0D%0A
@@ -732,19 +732,23 @@
bidask=
-igs
+service
.get_bid
@@ -751,25 +751,16 @@
_bidask(
-service,
instrume
@@ -774,16 +774,52 @@
print(
+'%5Cn', 'get_bidask of EURUSD', '%5Cn',
bidask)%0D
@@ -906,19 +906,23 @@
closes=
-igs
+service
.get_clo
@@ -929,17 +929,8 @@
ses(
-service,
inst
@@ -970,16 +970,76 @@
print(
+'%5Cn', 'get_closes of EURUSD for the last 10 minutes', '%5Cn',
closes)%0D
@@ -1079,12 +1079,14 @@
%0D%0A main()
+%0D%0A
|
e18a80ac32b93e12e0faf03d97ccf7104d1db1ac
|
Update constants.py
|
azurecloudify/constants.py
|
azurecloudify/constants.py
|
########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# Look at https://github.com/cloudify-cosmo/cloudify-aws-plugin/blob/1.2/ec2/constants.py
# instance module constants
credentials = 'Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6Ik1uQ19WWmNBVGZNNXBPWWlKSE1iYTlnb0VLWSIsImtpZCI6Ik1uQ19WWmNBVGZNNXBPWWlKSE1iYTlnb0VLWSJ9.eyJhdWQiOiJodHRwczovL21hbmFnZW1lbnQuY29yZS53aW5kb3dzLm5ldC8iLCJpc3MiOiJodHRwczovL3N0cy53aW5kb3dzLm5ldC9lOGY4M2RkZi00ZGVlLTRlMWQtYjc3YS1mZjNkZGFhZjUyZDgvIiwiaWF0IjoxNDQxMjUyOTM0LCJuYmYiOjE0NDEyNTI5MzQsImV4cCI6MTQ0MTI1NjgzNCwidmVyIjoiMS4wIiwidGlkIjoiZThmODNkZGYtNGRlZS00ZTFkLWI3N2EtZmYzZGRhYWY1MmQ4Iiwib2lkIjoiNDg0ODI0YzktZWY5ZC00NmFjLThjMjktNWQxZTg0ZjI5YmFhIiwic3ViIjoiNDg0ODI0YzktZWY5ZC00NmFjLThjMjktNWQxZTg0ZjI5YmFhIiwiaWRwIjoiaHR0cHM6Ly9zdHMud2luZG93cy5uZXQvZThmODNkZGYtNGRlZS00ZTFkLWI3N2EtZmYzZGRhYWY1MmQ4LyIsImFwcGlkIjoiNGRlNDQ3NTAtMmM1OC00MDdlLTg1MDQtMDhkOWMyZTkxMjU0IiwiYXBwaWRhY3IiOiIxIn0.b7aTuJJFOkakE9ZPW7uqI_-MRJZ6Nc0-9AHvPWHb3HMR9pFhwQHMpgIGH_Q1Thru-ggrdbKqNtw252TdofmS4UH5Wo3YYRQo0Q-nP8Knwsa59PxZAwWp4S_0E8gSFcKth_QaiXtDDfn9Pv544d-voa69SXn5dN4PRsxLPvjOoYFYTMO0l04LfUhPO0flQ-3YdhJgKevw2C_flx8pCRFz7cwAqRzV_3pTG3Po0_CxYsoK3_jGgW_6uyPKxepSRzQMQB-jGjP-gDxXcULahaBbt81vr8iqR3fovPHhqR6KL8fbXHfjnztMl5d_-AIvX5u9UAXUuhiUQg0NPKHZwm7j9w'
headers = {"Content-Type": "application/json", "Authorization": credentials}
subscription_id = 'REPLACE_WITH_SUBSCRIPTION_ID'
COMMON_REQ_PROPERTIES=['subscription_id','location']
api_version='2015-05-01-preview'
api_version_resource_group='2015-01-01'
storage_account_type = 'Standard_LRS'
vnet_address_prefixes = ["10.1.0.0/16","10.2.0.0/16"]
subnet_name = 'Subnet-1'
address_prefix = "10.1.0.0/24"
ip_config_name = 'myip1'
image_reference_publisher = 'Canonical'
image_reference_offer = 'UbuntuServer'
image_reference_sku = '14.04.2-LTS'
image_reference_version = 'latest'
os_disk_name = 'osdisk'
vm_caching = 'ReadWrite'
vm_createOption = 'FromImage'
admin_username='azuretest'
vm_version="latest"
azure_url='https://management.azure.com'
key_data= """---- BEGIN SSH2 PUBLIC KEY ----
Comment: "rsa-key-20150804"
AAAAB3NzaC1yc2EAAAABJQAAAQEA0Y5tAjA2C9xPLRMMfU37J3kGUYQzRAbPu2gN
9HKKB+/bkzEE+W9zysYgL1vu3heqUewQlnMz2G6gfDca+6FmitMpZdz8E0ZYUy4M
CG+fWs/6xT92OsVLAi2VRgQlyGqOD+KJEZdMnIbbWyPzaLC0yaUDEUNWe2hRNkr0
daRY21UCCZG9+zZNR4ndJWxjJyF4Om1G4R5gruickOs5yECbgEMISpENWmXATc6U
UsVhRznp4u6iBusZO3ilH7B3YbDyGhXs4X/TcwBj6zuWaJsHXzorTL621g4Ppp4I
g6QVQSrBpNBe2JCjou6tlGSBFm7vApUwAYaMStDzaIcLck/nUQ==
---- END SSH2 PUBLIC KEY ----"""
resource = 'https://management.core.windows.net/'
RESOURCE_GROUP_REQUIRED_PROPERTIES=['vm_name','location','subscription_id']
STORAGE_ACCOUNT_REQUIRED_PROPERTIES = ['vm_name','location','subscription_id']
VNET_REQUIRED_PROPERTIES = ['vm_name','location','subscription_id']
VM_REQUIRED_PROPERTIES = ['vm_name','vm_os_type','vm_size','subscription_id','key_data','location']
NIC_REQUIRED_PROPERTIES = ['vm_name','location','subscription_id']
PUBLIC_IP_REQUIRED_PROPERTIES = ['vm_name','location','subscription_id']
|
Python
| 0.000001 |
@@ -777,1088 +777,10 @@
= '
-Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6Ik1uQ19WWmNBVGZNNXBPWWlKSE1iYTlnb0VLWSIsImtpZCI6Ik1uQ19WWmNBVGZNNXBPWWlKSE1iYTlnb0VLWSJ9.eyJhdWQiOiJodHRwczovL21hbmFnZW1lbnQuY29yZS53aW5kb3dzLm5ldC8iLCJpc3MiOiJodHRwczovL3N0cy53aW5kb3dzLm5ldC9lOGY4M2RkZi00ZGVlLTRlMWQtYjc3YS1mZjNkZGFhZjUyZDgvIiwiaWF0IjoxNDQxMjUyOTM0LCJuYmYiOjE0NDEyNTI5MzQsImV4cCI6MTQ0MTI1NjgzNCwidmVyIjoiMS4wIiwidGlkIjoiZThmODNkZGYtNGRlZS00ZTFkLWI3N2EtZmYzZGRhYWY1MmQ4Iiwib2lkIjoiNDg0ODI0YzktZWY5ZC00NmFjLThjMjktNWQxZTg0ZjI5YmFhIiwic3ViIjoiNDg0ODI0YzktZWY5ZC00NmFjLThjMjktNWQxZTg0ZjI5YmFhIiwiaWRwIjoiaHR0cHM6Ly9zdHMud2luZG93cy5uZXQvZThmODNkZGYtNGRlZS00ZTFkLWI3N2EtZmYzZGRhYWY1MmQ4LyIsImFwcGlkIjoiNGRlNDQ3NTAtMmM1OC00MDdlLTg1MDQtMDhkOWMyZTkxMjU0IiwiYXBwaWRhY3IiOiIxIn0.b7aTuJJFOkakE9ZPW7uqI_-MRJZ6Nc0-9AHvPWHb3HMR9pFhwQHMpgIGH_Q1Thru-ggrdbKqNtw252TdofmS4UH5Wo3YYRQo0Q-nP8Knwsa59PxZAwWp4S_0E8gSFcKth_QaiXtDDfn9Pv544d-voa69SXn5dN4PRsxLPvjOoYFYTMO0l04LfUhPO0flQ-3YdhJgKevw2C_flx8pCRFz7cwAqRzV_3pTG3Po0_CxYsoK3_jGgW_6uyPKxepSRzQMQB-jGjP-gDxXcULahaBbt81vr8iqR3fovPHhqR6KL8fbXHfjnztMl5d_-AIvX5u9UAXUuhiUQg0NPKHZwm7j9w
'%0A
+
head
@@ -1534,481 +1534,10 @@
ta=
-%22%22%22---- BEGIN SSH2 PUBLIC KEY ----%0AComment: %22rsa-key-20150804%22%0AAAAAB3NzaC1yc2EAAAABJQAAAQEA0Y5tAjA2C9xPLRMMfU37J3kGUYQzRAbPu2gN%0A9HKKB+/bkzEE+W9zysYgL1vu3heqUewQlnMz2G6gfDca+6FmitMpZdz8E0ZYUy4M%0ACG+fWs/6xT92OsVLAi2VRgQlyGqOD+KJEZdMnIbbWyPzaLC0yaUDEUNWe2hRNkr0%0AdaRY21UCCZG9+zZNR4ndJWxjJyF4Om1G4R5gruickOs5yECbgEMISpENWmXATc6U%0AUsVhRznp4u6iBusZO3ilH7B3YbDyGhXs4X/TcwBj6zuWaJsHXzorTL621g4Ppp4I%0Ag6QVQSrBpNBe2JCjou6tlGSBFm7vApUwAYaMStDzaIcLck/nUQ==%0A---- END SSH2 PUBLIC KEY ----%22%22%22
+''
%0A%0Are
|
08fe4ba4e2804baff972ee0a430ebe0b6ea1e0b2
|
prepare compatibility
|
folderless/models.py
|
folderless/models.py
|
# coding: utf-8
from __future__ import unicode_literals
import hashlib
import os
from django.core.exceptions import ValidationError
from django.core.files.base import File as DjangoFile
from django.utils.encoding import python_2_unicode_compatible
from django.db import models
from django.core import urlresolvers
from django.db.models.signals import pre_save, pre_delete
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from easy_thumbnails.fields import ThumbnailerField
from easy_thumbnails.files import get_thumbnailer
from conf import settings
from folderless.utils import get_valid_filename, sha1_from_file, model_get_all_related_objects
OTHER_TYPE = 'other'
@python_2_unicode_compatible
class File(models.Model):
file = ThumbnailerField(
_('File'), upload_to=settings.FOLDERLESS_UPLOAD_TO,
storage=settings.FOLDERLESS_FILE_STORAGE,
thumbnail_storage=settings.FOLDERLESS_THUMBNAIL_STORAGE)
name = models.CharField(
_('name'), max_length=255, blank=True, default='')
filename = models.CharField(
_('Filename'),
help_text=_(u'Use for renaming your file on the server'),
max_length=255, blank=False, unique=True)
author = models.CharField(
_('Author'), max_length=255, blank=True, default='')
copyright = models.CharField(
_('Copyright'), max_length=255, blank=True, default='')
type = models.CharField(
_('File type'), max_length=12, choices=(), blank=True)
uploader = models.ForeignKey(
getattr(settings, 'AUTH_USER_MODEL', 'auth.User'),
related_name='owned_files', null=True, blank=True,
verbose_name=_('Uploader'))
created = models.DateTimeField(
_('Created'), default=timezone.now)
modified = models.DateTimeField(
_('Modified'), auto_now=True)
extension = models.CharField(
_('Extension'), max_length=255, blank=True, default='')
file_hash = models.CharField(
_('Checksum'), help_text=_(u'For preventing duplicates'),
max_length=40, blank=False, unique=True)
def __str__(self):
return '%s' % self.filename
class Meta:
verbose_name = _(u'File')
verbose_name_plural = _(u'Files')
def save(self, *args, **kwargs):
if not self.filename:
self.filename = self.file.file
super(File, self).save(*args, **kwargs)
# you should not change extensions!
def clean(self, *args, **kwargs):
super(File, self).clean(*args, **kwargs)
if (self.id):
old_instance = File.objects.get(pk=self.id)
if not old_instance.filename == self.filename:
self.filename = get_valid_filename(self.filename)
old_name, old_extension = os.path.splitext(old_instance.filename)
new_name, new_extension = os.path.splitext(self.filename)
if not old_extension.lower() == new_extension.lower():
raise ValidationError(_("File extension must stay the same."))
def generate_file_hash(self):
self.file_hash = sha1_from_file(self.file)
@property
def is_image(self):
if self.file:
image_definition = settings.FOLDERLESS_FILE_TYPES.get(
'image', None)
if image_definition is not None:
if self.extension in image_definition.get('extensions'):
return True
return False
@property
def label(self):
if self.name:
return '%s (%s)' % (self.name, self.filename)
else:
return self.filename
@property
def admin_url(self):
return urlresolvers.reverse(
'admin:folderless_file_change',
args=(self.pk,)
)
@property
def thumb_field_url(self):
if self.is_image:
return self._thumb_url(
settings.FOLDERLESS_IMAGE_WIDTH_FIELD,
settings.FOLDERLESS_IMAGE_HEIGHT_FIELD)
else:
return
@property
def thumb_list_url(self):
if self.is_image:
return self._thumb_url(
settings.FOLDERLESS_IMAGE_WIDTH_LIST,
settings.FOLDERLESS_IMAGE_HEIGHT_LIST)
else:
return
def thumb_list(self):
if self.is_image:
url = self.thumb_list_url
return '<a href="%s" target="_blank"><img src="%s" alt="%s"></a>' \
% (self.file.url, url, self.label)
else:
return
thumb_list.allow_tags = True
thumb_list.short_description = _(u'Thumb')
def references_list(self):
links = [
rel.get_accessor_name()
for rel in model_get_all_related_objects(File)
]
total = 0
for link in links:
total += getattr(self, link).all().count()
if total > 0:
return "%sx" % total
else:
return "-"
references_list.allow_tags = True
references_list.short_description = _(u'Referenced?')
def get_json_response(self):
return {
'id': self.id,
'file_url': self.file.url,
'edit_url': self.admin_url,
'thumbnail_list': self.thumb_list_url,
'thumbnail_field': self.thumb_field_url,
'label': self.label,
}
def _thumb_url(self, width, height):
thumbnailer = get_thumbnailer(self.file)
thumbnail_options = {
'size': (width, height)
}
thumb = thumbnailer.get_thumbnail(thumbnail_options)
return thumb.url
@property
def url(self):
"""
to make the model behave like a file field
"""
try:
r = self.file.url
except:
r = ''
return r
@receiver(pre_save, sender=File, dispatch_uid="folderless_file_processing")
def folderless_file_processing(sender, **kwargs):
"""
what we do here:
- determine file type, set it.
- generate file hash
- check for file renames
"""
instance = kwargs.get("instance")
if instance.file:
name, extension = os.path.splitext(instance.file.name)
if len(extension) > 1:
instance.extension = extension[1:].lower()
else:
instance.extension = ''
instance.type = OTHER_TYPE
for type, definition in settings.FOLDERLESS_FILE_TYPES.iteritems():
if instance.extension in definition.get("extensions"):
instance.type = type
instance.generate_file_hash()
if instance.id:
old_instance = File.objects.get(pk=instance.id)
if not old_instance.filename == instance.filename:
# rename!
new_file = DjangoFile(open(instance.file.path, mode='rb'))
instance.file.delete(False) # remove including thumbs
instance.file.save(instance.filename, new_file, save=False)
# do this with a signal, to catch them all
@receiver(pre_delete, sender=File)
def cleanup_file_on_delete(sender, instance, **kwargs):
# includes thumbnails
instance.file.delete(False)
|
Python
| 0 |
@@ -601,16 +601,23 @@
r%0A%0Afrom
+django.
conf imp
|
d23a68d464c62cdefb76dbe5855110374680ae61
|
Add coverage metrics to python code
|
regulations/settings/dev.py
|
regulations/settings/dev.py
|
from .base import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
STATICFILES_DIRS = (
root('static'),
)
OFFLINE_OUTPUT_DIR = '/tmp/'
INSTALLED_APPS += (
'django_nose',
)
NOSE_ARGS = [
'--exclude-dir=regulations/uitests'
]
try:
from local_settings import *
except ImportError:
pass
|
Python
| 0.000004 |
@@ -182,16 +182,74 @@
RGS = %5B%0A
+ '--with-coverage',%0A '--cover-package=regulations',%0A
'--e
|
6b5d4f43b5d22f70db6d08f8093f88785359a404
|
Implement DirectoryTarget
|
streaming_form_data/targets.py
|
streaming_form_data/targets.py
|
import hashlib
from typing import Callable, Optional
class BaseTarget:
"""
Targets determine what to do with some input once the parser is done
processing it. Any new Target should inherit from this base class and
override the :code:`data_received` function.
Attributes:
multipart_filename: the name of the file advertised by the user,
extracted from the :code:`Content-Disposition` header. Please note
that this value comes directly from the user input and is not
sanitized, so be careful in using it directly.
multipart_content_type: MIME Content-Type of the file, extracted from
the :code:`Content-Type` HTTP header
"""
def __init__(self, validator: Optional[Callable] = None):
self.multipart_filename = None
self.multipart_content_type = None
self._started = False
self._finished = False
self._validator = validator
def _validate(self, chunk: bytes):
if self._validator:
self._validator(chunk)
def start(self):
self._started = True
self.on_start()
def on_start(self):
pass
def data_received(self, chunk: bytes):
self._validate(chunk)
self.on_data_received(chunk)
def on_data_received(self, chunk: bytes):
raise NotImplementedError()
def finish(self):
self.on_finish()
self._finished = True
def on_finish(self):
pass
class NullTarget(BaseTarget):
"""NullTarget ignores whatever input is passed in.
This is mostly useful for internal use and should (normally) not be
required by external users.
"""
def on_data_received(self, chunk: bytes):
pass
class ValueTarget(BaseTarget):
"""ValueTarget stores the input in an in-memory list of bytes.
This is useful in case you'd like to have the value contained in an
in-memory string.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._values = []
def on_data_received(self, chunk: bytes):
self._values.append(chunk)
@property
def value(self):
return b''.join(self._values)
class FileTarget(BaseTarget):
"""FileTarget writes (streams) the input to an on-disk file."""
def __init__(
self, filename: str, allow_overwrite: bool = True, *args, **kwargs
):
super().__init__(*args, **kwargs)
self.filename = filename
self._mode = 'wb' if allow_overwrite else 'xb'
self._fd = None
def on_start(self):
self._fd = open(self.filename, self._mode)
def on_data_received(self, chunk: bytes):
if self._fd:
self._fd.write(chunk)
def on_finish(self):
if self._fd:
self._fd.close()
class SHA256Target(BaseTarget):
"""SHA256Target calculates the SHA256 hash of the given input."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._hash = hashlib.sha256()
def on_data_received(self, chunk: bytes):
self._hash.update(chunk)
@property
def value(self):
return self._hash.hexdigest()
|
Python
| 0 |
@@ -2816,24 +2816,944 @@
d.close()%0A%0A%0A
+class DirectoryTarget(BaseTarget):%0A %22%22%22DirectoryTarget writes (streams) the different input to an on-disk directory.%22%22%22%0A%0A def __init__(%0A self, directorypath: str, allow_overwrite: bool = True, *args, **kwargs%0A ):%0A super().__init__(*args, **kwargs)%0A%0A self.directorypath = directorypath%0A%0A self._mode = 'wb' if allow_overwrite else 'xb'%0A self._fd = None%0A self.multipart_filenames = %5B%5D%0A self.multipart_content_types = %5B%5D%0A%0A def on_start(self):%0A self._fd = open(self.directorypath.joinpath(self.multipart_filename), self._mode)%0A%0A def on_data_received(self, chunk: bytes):%0A if self._fd:%0A self._fd.write(chunk)%0A%0A def on_finish(self):%0A self.multipart_filenames.append(self.multipart_filename)%0A self.multipart_content_types.append(self.multipart_content_type)%0A if self._fd:%0A self._fd.close()%0A%0A %0A
class SHA256
|
addee10ba5d79faa6223eedab00fec8cabe1e464
|
fix attributeerror
|
private_storage/storage/s3boto3.py
|
private_storage/storage/s3boto3.py
|
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from django.utils.deconstruct import deconstructible
from private_storage import appconfig
from storages.backends.s3boto3 import S3Boto3Storage
from storages.utils import setting
@deconstructible
class PrivateS3BotoStorage(S3Boto3Storage):
"""
Private storage bucket for S3
"""
access_key_names = ['AWS_PRIVATE_S3_ACCESS_KEY_ID', 'AWS_PRIVATE_ACCESS_KEY_ID'] + S3Boto3Storage.access_key_names
secret_key_names = ['AWS_PRIVATE_S3_SECRET_ACCESS_KEY', 'AWS_PRIVATE_SECRET_ACCESS_KEY'] + S3Boto3Storage.secret_key_names
def __init__(self, **settings):
super().__init__(**settings)
self.file_overwrite = setting('AWS_PRIVATE_S3_FILE_OVERWRITE', False) # false, differ from base class
self.object_parameters = setting('AWS_PRIVATE_S3_OBJECT_PARAMETERS', {})
self.bucket_name = setting('AWS_PRIVATE_STORAGE_BUCKET_NAME')
self.auto_create_bucket = setting('AWS_PRIVATE_AUTO_CREATE_BUCKET', False)
self.default_acl = setting('AWS_PRIVATE_DEFAULT_ACL', 'private') # differ from base class
self.bucket_acl = setting('AWS_PRIVATE_BUCKET_ACL', default_acl)
self.querystring_auth = setting('AWS_PRIVATE_QUERYSTRING_AUTH', True)
self.querystring_expire = setting('AWS_PRIVATE_QUERYSTRING_EXPIRE', 3600)
self.signature_version = setting('AWS_PRIVATE_S3_SIGNATURE_VERSION')
self.reduced_redundancy = setting('AWS_PRIVATE_REDUCED_REDUNDANCY', False)
self.location = setting('AWS_PRIVATE_LOCATION', '')
self.encryption = setting('AWS_PRIVATE_S3_ENCRYPTION', False)
self.custom_domain = setting('AWS_PRIVATE_S3_CUSTOM_DOMAIN')
self.addressing_style = setting('AWS_PRIVATE_S3_ADDRESSING_STYLE')
self.secure_urls = setting('AWS_PRIVATE_S3_SECURE_URLS', True)
self.file_name_charset = setting('AWS_PRIVATE_S3_FILE_NAME_CHARSET', 'utf-8')
self.preload_metadata = setting('AWS_PRIVATE_PRELOAD_METADATA', False)
self.endpoint_url = setting('AWS_PRIVATE_S3_ENDPOINT_URL', None)
self.use_ssl = setting('AWS_PRIVATE_S3_USE_SSL', True)
# default settings used to be class attributes on S3Boto3Storage, but
# are now part of the initialization or moved to a dictionary
self.access_key = setting('AWS_PRIVATE_S3_ACCESS_KEY_ID', setting('AWS_PRIVATE_ACCESS_KEY_ID', self.access_key))
self.secret_key = setting('AWS_PRIVATE_S3_SECRET_ACCESS_KEY', setting('AWS_PRIVATE_SECRET_ACCESS_KEY', self.secret_key))
if hasattr(self, get_default_settings):
default_settings = self.get_default_settings()
self.gzip = setting('AWS_PRIVATE_IS_GZIPPED', default_settings["gzip"]) # fallback to default
self.url_protocol = setting('AWS_PRIVATE_S3_URL_PROTOCOL', default_settings["url_protocol"]) # fallback to default
self.region_name = setting('AWS_PRIVATE_S3_REGION_NAME', default_settings["region_name"]) # fallback to default
else: # backward compatibility
self.gzip = setting('AWS_PRIVATE_IS_GZIPPED', self.gzip)
self.url_protocol = setting('AWS_PRIVATE_S3_URL_PROTOCOL', self.url_protocol)
self.region_name = setting('AWS_PRIVATE_S3_REGION_NAME', self.region_name)
def url(self, name, *args, **kwargs):
if appconfig.PRIVATE_STORAGE_S3_REVERSE_PROXY or not self.querystring_auth:
# There is no direct URL possible, return our streaming view instead.
return reverse('serve_private_file', kwargs={'path': name})
else:
# The S3Boto3Storage can generate a presigned URL that is temporary available.
return super(PrivateS3BotoStorage, self).url(name, *args, **kwargs)
@deconstructible
class PrivateEncryptedS3BotoStorage(PrivateS3BotoStorage):
"""
Enforced encryption for private storage on S3.
This is a convience option, it can also be implemented
through :class:`PrivateS3BotoStorage` by using the proper settings.
"""
encryption = True
def __init__(self, **settings):
super().__init__(**settings)
self.signature_version = self.signature_version or 's3v4'
|
Python
| 0.000002 |
@@ -1217,24 +1217,29 @@
UCKET_ACL',
+self.
default_acl)
|
410287c307bc0a967cac7845d8f706daf30cfa9d
|
Bump node16 from `16.14.2` to `16.15.0`
|
node_archives.bzl
|
node_archives.bzl
|
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def repositories():
# Node (https://nodejs.org/en/about/releases/)
# Follow Node's maintainence schedule and support all LTS versions that are not end of life
http_archive(
name = "nodejs12_amd64",
build_file = "//nodejs:BUILD.nodejs",
sha256 = "ff92a45c4d03e8e270bec1ab337b8fff6e9de293dabfe7e8936a41f2fb0b202e",
strip_prefix = "node-v12.22.12-linux-x64/",
type = "tar.gz",
urls = ["https://nodejs.org/dist/v12.22.12/node-v12.22.12-linux-x64.tar.gz"],
)
http_archive(
name = "nodejs14_amd64",
build_file = "//nodejs:BUILD.nodejs",
sha256 = "3019e0508145c4c1fc6662f0b9b1c78bb84181aeea4749fac38ca51587aaf82f",
strip_prefix = "node-v14.19.1-linux-x64/",
type = "tar.gz",
urls = ["https://nodejs.org/dist/v14.19.1/node-v14.19.1-linux-x64.tar.gz"],
)
http_archive(
name = "nodejs16_amd64",
build_file = "//nodejs:BUILD.nodejs",
sha256 = "57e02c27eb5e52f560f72d96240e898cb52818dc9fc50f45478ce39ece38583a",
strip_prefix = "node-v16.14.2-linux-x64/",
type = "tar.gz",
urls = ["https://nodejs.org/dist/v16.14.2/node-v16.14.2-linux-x64.tar.gz"],
)
http_archive(
name = "nodejs12_arm64",
build_file = "//nodejs:BUILD.nodejs",
sha256 = "91aefa690914b7f24250f3c0b560b42c6d306315d40009c96b5a6940115895fe",
strip_prefix = "node-v12.22.12-linux-arm64/",
type = "tar.gz",
urls = ["https://nodejs.org/dist/v12.22.12/node-v12.22.12-linux-arm64.tar.gz"],
)
http_archive(
name = "nodejs14_arm64",
build_file = "//nodejs:BUILD.nodejs",
sha256 = "b365659aa9f31c984668ac60b70fcfae90ffabb3dd51b031898b050e403c1794",
strip_prefix = "node-v14.19.1-linux-arm64/",
type = "tar.gz",
urls = ["https://nodejs.org/dist/v14.19.1/node-v14.19.1-linux-arm64.tar.gz"],
)
http_archive(
name = "nodejs16_arm64",
build_file = "//nodejs:BUILD.nodejs",
sha256 = "8a792a4cb6d83a960f7bd2901225c492e40ace541fbd73ff59ac4a332c3aaafb",
strip_prefix = "node-v16.14.2-linux-arm64/",
type = "tar.gz",
urls = ["https://nodejs.org/dist/v16.14.2/node-v16.14.2-linux-arm64.tar.gz"],
)
|
Python
| 0 |
@@ -1050,72 +1050,72 @@
= %22
-57e02c27eb5e52f560f72d96240e898cb52818dc9fc50f45478ce39ece38583a
+d1c1de461be10bfd9c70ebae47330fb1b4ab0a98ad730823fb1340e34993edee
%22,%0A
@@ -1139,35 +1139,35 @@
ix = %22node-v16.1
-4.2
+5.0
-linux-x64/%22,%0A
@@ -1227,35 +1227,35 @@
s.org/dist/v16.1
-4.2
+5.0
/node-v16.14.2-l
@@ -1245,27 +1245,27 @@
0/node-v16.1
-4.2
+5.0
-linux-x64.t
@@ -2108,71 +2108,71 @@
= %22
-8a792a4cb6d83a960f7bd2901225c492e40ace541fbd73ff59ac4a332c3aaaf
+2aa387e6a57ade663849efdc4fabf7431a38d975db98dcc79293840e6894d28
b%22,%0A
@@ -2197,35 +2197,35 @@
ix = %22node-v16.1
-4.2
+5.0
-linux-arm64/%22,%0A
@@ -2295,19 +2295,19 @@
st/v16.1
-4.2
+5.0
/node-v1
@@ -2313,11 +2313,11 @@
16.1
-4.2
+5.0
-lin
|
2bb57c932f3337fe5df24040523920f399a88fb2
|
The function apply_template has been renamed to display.
|
trac/File.py
|
trac/File.py
|
# -*- coding: iso8859-1 -*-
#
# Copyright (C) 2003, 2004 Edgewall Software
# Copyright (C) 2003, 2004 Jonas Borgstrm <[email protected]>
#
# Trac is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Trac is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# Author: Jonas Borgstrm <[email protected]>
import sys
import StringIO
from time import gmtime, strftime
from svn import fs, util, delta
from Module import Module
from util import dict_get_with_default
import perm
class File (Module):
CHUNK_SIZE = 4096
def render (self):
self.perm.assert_permission (perm.FILE_VIEW)
def get_mime_type (self, root, path):
"""
Try to use the mime-type stored in subversion. text/plain is default.
"""
type = fs.node_prop (root, path, util.SVN_PROP_MIME_TYPE, self.pool)
if not type:
type = 'text/plain'
return type
def apply_template (self):
rev = dict_get_with_default(self.args, 'rev', None)
path = dict_get_with_default(self.args, 'path', '/')
if not rev:
rev = fs.youngest_rev(self.fs_ptr, self.pool)
else:
rev = int(rev)
root = fs.revision_root(self.fs_ptr, rev, self.pool)
mime_type = self.get_mime_type (root, path)
size = fs.file_length(root, path, self.pool)
date = fs.revision_prop(self.fs_ptr, rev,
util.SVN_PROP_REVISION_DATE, self.pool)
date_seconds = util.svn_time_from_cstring(date, self.pool) / 1000000
date = strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime(date_seconds))
sys.stdout.write('Last-Modified: %s\r\n' % date)
sys.stdout.write('Content-Length: %d\r\n' % size)
sys.stdout.write('Content-Type: %s\r\n\r\n' % mime_type)
file = fs.file_contents(root, path, self.pool)
while 1:
data = util.svn_stream_read(file, self.CHUNK_SIZE)
if not data:
break
sys.stdout.write(data)
|
Python
| 0.999875 |
@@ -1450,22 +1450,15 @@
def
-apply_template
+display
(se
|
f2bd0ddb3341eb9f0ecf090650494deb871bcea4
|
Add ensembl tasks to the main luigi wrapper
|
luigi/tasks/genome_mapping/__init__.py
|
luigi/tasks/genome_mapping/__init__.py
|
# -*- coding: utf-8 -*-
"""
Copyright [2009-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import luigi
from .genome_mapping_tasks import GetFasta
from .genome_mapping_tasks import CleanSplitFasta
from .genome_mapping_tasks import GetChromosomes
from .genome_mapping_tasks import BlatJob
from .genome_mapping_tasks import ParsePslOutput
from .genome_mapping_tasks import SpeciesBlatJob
from .update_ensembl_assembly import RetrieveEnsemblAssemblies
from .update_ensembl_assembly import RetrieveEnsemblGenomesAssemblies
from .pgload_exact_matches import GenomeMappingPGLoadExactMatches
from .pgload_inexact_matches import GenomeMappingPGLoadInexactMatches
from .pgload_ensembl_assembly import GenomeMappingPGLoadEnsemblAssembly
from .pgload_ensembl_assembly import GenomeMappingPGLoadEnsemblGenomesAssembly
GENOMES = {
9606: 'homo_sapiens',
10090: 'mus_musculus',
10116: 'rattus_norvegicus',
7227: 'drosophila_melanogaster',
#7165: 'anopheles_gambiae',
# 352472: 'dictyostelium_discoideum',
# 36329: 'plasmodium_falciparum',
3702: 'arabidopsis_thaliana',
7091: 'bombyx_mori',
# 284812: 'schizosaccharomyces_pombe', # wrong taxid = need to use 4896
# 559292: 'saccharomyces_cerevisiae', # correct taxid but ensembl uses something else
6239: 'caenorhabditis_elegans',
}
def get_taxids_for_genome_mapping():
"""
Get taxids for genomes that are used for mapping.
"""
return GENOMES.keys()
class SpeciesFastaExportWrapper(luigi.WrapperTask):
"""
A wrapper task to export fasta files for all species that will be mapped
to the reference genomes using blat.
"""
def requires(self):
for taxid in get_taxids_for_genome_mapping():
yield GetFasta(taxid=taxid)
class SpeciesFastaCleanSplitWrapper(luigi.WrapperTask):
"""
A wrapper task to keep only sequences of certain length and split fasta
files in chunks.
"""
def requires(self):
for taxid in get_taxids_for_genome_mapping():
yield CleanSplitFasta(taxid=taxid)
class GetChromosomeFastaWrapper(luigi.WrapperTask):
"""
A wrapper task for getting a list of all chromosome fasta files
that are used in parallel blat searches.
"""
def requires(self):
for taxid in get_taxids_for_genome_mapping():
yield GetChromosomes(taxid=taxid)
class BlatJobsWrapper(luigi.WrapperTask):
"""
A wrapper task for running blat searches of all split RNAcentral fasta files
against all chromosomes within the same species.
"""
def requires(self):
for taxid in get_taxids_for_genome_mapping():
SpeciesBlatJob(taxid=taxid)
class ParsePslOutputWrapper(luigi.WrapperTask):
"""
A wrapper task for parsing all blat output into tsv files that can be
loaded into the database.
"""
def requires(self):
for taxid in get_taxids_for_genome_mapping():
yield ParsePslOutput(taxid=taxid)
class PGLoadGenomeMappingWrapper(luigi.WrapperTask):
"""
A wrapper task for loading parsed blat output into the database.
"""
def requires(self):
for taxid in get_taxids_for_genome_mapping():
yield [
GenomeMappingPGLoadExactMatches(taxid=taxid),
GenomeMappingPGLoadInexactMatches(taxid=taxid),
]
class GenomeMappingPipelineWrapper(luigi.WrapperTask):
"""
A wrapper task for the entire genome mapping pipeline.
"""
def requires(self):
yield GetChromosomeFastaWrapper()
yield SpeciesFastaCleanSplitWrapper()
yield BlatJobsWrapper()
yield ParsePslOutputWrapper()
yield PGLoadGenomeMappingWrapper()
|
Python
| 0.000001 |
@@ -4014,32 +4014,123 @@
requires(self):%0A
+ yield RetrieveEnsemblAssemblies()%0A yield RetrieveEnsemblGenomesAssemblies()%0A
yield Ge
|
f5202cc14ee928753c6955a315432d6a8a88f932
|
Test for the dummy put_info_records() method.
|
pyexodus/tests/test_writing_exodus.py
|
pyexodus/tests/test_writing_exodus.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:copyright:
Lion Krischer ([email protected]), 2016
:license:
MIT License
"""
import os
import h5netcdf
import numpy as np
from pyexodus import exodus
def test_initialization(tmpdir):
filename = os.path.join(tmpdir.strpath, "example.e")
e = exodus(filename,
mode="w",
title="Example",
array_type="numpy",
numDims=3,
numNodes=5,
numElems=6,
numBlocks=1,
numNodeSets=0,
numSideSets=1)
e.close()
# Just manually test everything.
with h5netcdf.File(filename, mode="r") as f:
"""
Tests initialization.
Test data has been generated by using the official exodus Python API.
"""
assert dict(f.attrs) == {
'api_version': np.array([6.30000019], dtype=np.float32),
'file_size': np.array([1], dtype=np.int32),
'floating_point_word_size': np.array([8], dtype=np.int32),
'int64_status': np.array([0], dtype=np.int32),
'maximum_name_length': np.array([32], dtype=np.int32),
'title': 'Example',
'version': np.array([6.30000019], dtype=np.float32)}
assert dict(f.dimensions) == {
'four': 4,
'len_line': 81,
'len_name': 33,
'len_string': 33,
'num_dim': 3,
'num_el_blk': 1,
'num_elem': 6,
'num_nodes': 5,
'num_side_sets': 1,
# XXX: This is different from the original file!
'time_step': 1}
assert list(f.groups) == []
# Testing the variables is a bit more effort.
# Generate with
# v = f.variables
# {k: {"dimensions": v[k].dimensions,
# "shape": v[k].shape,
# "dtype": v[k].dtype,
# "attrs": dict(v[k].attrs),
# "data": v[k][:]} for k in f.variables.keys()} == \
expected = {
'coor_names': {
'attrs': {},
'data': np.array([
['', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', ''],
['', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', ''],
['', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '']], dtype='|S1'),
'dimensions': ('num_dim', 'len_name'),
'dtype': np.dtype('S1'),
'shape': (3, 33)},
'coordx': {'attrs': {},
'data': np.array([0., 0., 0., 0., 0.]),
'dimensions': ('num_nodes',),
'dtype': np.dtype('float64'),
'shape': (5,)},
'coordy': {'attrs': {},
'data': np.array([0., 0., 0., 0., 0.]),
'dimensions': ('num_nodes',),
'dtype': np.dtype('float64'),
'shape': (5,)},
'coordz': {'attrs': {},
'data': np.array([0., 0., 0., 0., 0.]),
'dimensions': ('num_nodes',),
'dtype': np.dtype('float64'),
'shape': (5,)},
'eb_names': {'attrs': {},
'data': np.array([
['', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '']],
dtype='|S1'),
'dimensions': ('num_el_blk', 'len_name'),
'dtype': np.dtype('S1'),
'shape': (1, 33)},
'eb_prop1': {'attrs': {'name': 'ID'},
'data': np.array([-1], dtype=np.int32),
'dimensions': ('num_el_blk',),
'dtype': np.dtype('int32'),
'shape': (1,)},
'eb_status': {'attrs': {},
'data': np.array([0], dtype=np.int32),
'dimensions': ('num_el_blk',),
'dtype': np.dtype('int32'),
'shape': (1,)},
'ss_names': {'attrs': {},
'data': np.array([
['', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '']],
dtype='|S1'),
'dimensions': ('num_side_sets', 'len_name'),
'dtype': np.dtype('S1'),
'shape': (1, 33)},
'ss_prop1': {'attrs': {'name': 'ID'},
'data': np.array([-1], dtype=np.int32),
'dimensions': ('num_side_sets',),
'dtype': np.dtype('int32'),
'shape': (1,)},
'ss_status': {'attrs': {},
'data': np.array([0], dtype=np.int32),
'dimensions': ('num_side_sets',),
'dtype': np.dtype('int32'),
'shape': (1,)},
'time_whole': {'attrs': {},
# XXX: Empty array in original file.
'data': np.array([0.], dtype=np.float64),
'dimensions': ('time_step',),
'dtype': np.dtype('float64'),
# XXX: Shape = (0,) in original file.
'shape': (1,)}}
for key in sorted(expected.keys()):
a = f.variables[key]
e = expected[key]
assert dict(a.attrs) == e["attrs"], key
np.testing.assert_equal(a[:], e["data"], err_msg=key)
assert a.dimensions == e["dimensions"], key
assert a.dtype == e["dtype"], key
assert a.shape == e["shape"], key
|
Python
| 0 |
@@ -6438,16 +6438,497 @@
e%5B%22shape%22%5D, key%0A
+%0A%0Adef test_put_info_records(tmpdir):%0A %22%22%22%0A Does currently not do anything.%0A %22%22%22%0A filename = os.path.join(tmpdir.strpath, %22example.e%22)%0A%0A e = exodus(filename,%0A mode=%22w%22,%0A title=%22Example%22,%0A array_type=%22numpy%22,%0A numDims=3,%0A numNodes=5,%0A numElems=6,%0A numBlocks=1,%0A numNodeSets=0,%0A numSideSets=1)%0A e.put_info_records(strings=%5B%5D)%0A e.close()%0A
|
8cc36a325e8bedb7894f31fe049aee1aef903811
|
remove unused code
|
examples/glyphs/buttons_server.py
|
examples/glyphs/buttons_server.py
|
from __future__ import print_function
from bokeh.browserlib import view
from bokeh.document import Document
from bokeh.plotting import curdoc
from bokeh.models.widgets import (
VBox, Icon,
Button, Toggle, Dropdown,
CheckboxGroup, RadioGroup,
CheckboxButtonGroup, RadioButtonGroup,
)
from bokeh.models import Plot
from bokeh.client import push_session
def button_handler():
print("button_handler: click")
def toggle_handler(active):
print("toggle_handler: %s" % active)
def dropdown_handler(value):
print("dropdown_handler: %s" % value)
def split_handler(value):
print("split_handler: %s" % value)
def checkbox_group_handler(active):
print("checkbox_group_handler: %s" % active)
def radio_group_handler(active):
print("radio_group_handler: %s" % active)
def checkbox_button_group_handler(active):
print("checkbox_button_group_handler: %s" % active)
def radio_button_group_handler(active):
print("radio_button_group_handler: %s" % active)
button = Button(label="Push button", icon=Icon(name="check"), type="primary")
button.on_click(button_handler)
toggle = Toggle(label="Toggle button", type="success")
toggle.on_click(toggle_handler)
menu = [("Item 1", "item_1"), ("Item 2", "item_2"), None, ("Item 3", "item_3")]
dropdown = Dropdown(label="Dropdown button", type="warning", menu=menu)
dropdown.on_click(dropdown_handler)
menu = [("Item 1", "foo"), ("Item 2", "bar"), None, ("Item 3", "baz")]
split = Dropdown(label="Split button", type="danger", menu=menu, default_value="baz")
split.on_click(split_handler)
checkbox_group = CheckboxGroup(labels=["Option 1", "Option 2", "Option 3"], active=[0, 1])
checkbox_group.on_click(checkbox_group_handler)
radio_group = RadioGroup(labels=["Option 1", "Option 2", "Option 3"], active=0)
radio_group.on_click(radio_group_handler)
checkbox_button_group = CheckboxButtonGroup(labels=["Option 1", "Option 2", "Option 3"], active=[0, 1])
checkbox_button_group.on_click(checkbox_button_group_handler)
radio_button_group = RadioButtonGroup(labels=["Option 1", "Option 2", "Option 3"], active=0)
radio_button_group.on_click(radio_button_group_handler)
vbox = VBox(children=[button, toggle, dropdown, split, checkbox_group, radio_group, checkbox_button_group, radio_button_group])
document = Document()
document.add(vbox)
session = push_session(document)
session.show()
if __name__ == "__main__":
session.loop_until_closed()
|
Python
| 0.000017 |
@@ -298,38 +298,8 @@
,%0A)%0A
-from bokeh.models import Plot%0A
from
|
992fb00e8499ce19c952527eb9135754303cb365
|
abbreviation list in sentence tokenizer
|
src/indicnlp/tokenize/sentence_tokenize.py
|
src/indicnlp/tokenize/sentence_tokenize.py
|
# Copyright Anoop Kunchukuttan 2014 - present
#
# This file is part of Indic NLP Library.
#
# Indic NLP Library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Indic NLP Library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indic NLP Library. If not, see <http://www.gnu.org/licenses/>.
#
#Program for sentence splitting of Indian language input
#
# @author Anoop Kunchukuttan
#
import re
from indicnlp.transliterate import unicode_transliterate
DELIM_PAT=re.compile(r'[\.\?!\u0964\u0965]')
def is_acronym_char(text,lang):
ack_chars = {'ए', 'ऎ',
'बी', 'बि',
'सी', 'सि',
'डी', 'डि',
'ई', 'इ',
'एफ', 'ऎफ',
'जी', 'जि',
'एच','ऎच',
'आई', 'आइ','ऐ',
'जे', 'जॆ',
'के', 'कॆ',
'एल', 'ऎल',
'एम','ऎम',
'एन','ऎन',
'ओ', 'ऒ',
'पी', 'पि',
'क्यू', 'क्यु',
'आर',
'एस','ऎस',
'टी', 'टि',
'यू', 'यु',
'वी', 'वि', 'व्ही', 'व्हि',
'डब्ल्यू', 'डब्ल्यु',
'एक्स','ऎक्स',
'वाय',
'जेड', 'ज़ेड',
## add halant to the previous English character mappings.
'एफ्',
'ऎफ्',
'एच्',
'ऎच्',
'एल्',
'ऎल्',
'एम्',
'ऎम्',
'एन्',
'ऎन्',
'आर्',
'एस्',
'ऎस्',
'एक्स्',
'ऎक्स्',
'वाय्',
'जेड्', 'ज़ेड्',
}
return unicode_transliterate.UnicodeIndicTransliterator.transliterate(text,lang,'hi') in ack_chars
def sentence_split(text,lang,delim_pat=DELIM_PAT): ## New signature
line = text
### Phase 1: break on sentence delimiters.
cand_sentences=[]
begin=0
text = text.strip()
for mo in delim_pat.finditer(text):
p1=mo.start()
p2=mo.end()
## NEW
if p1>0 and text[p1-1].isnumeric():
continue
end=p1+1
s= text[begin:end].strip()
if len(s)>0:
cand_sentences.append(s)
begin=p1+1
s= text[begin:].strip()
if len(s)>0:
cand_sentences.append(s)
# print(cand_sentences)
# print('====')
# return cand_sentences
### Phase 2: Address the fact that '.' may not always be a sentence delimiter
### Method: If there is a run of lines containing only a word (optionally) and '.',
### merge these lines as well one sentence preceding and succeeding this run of lines.
final_sentences=[]
sen_buffer=''
bad_state=False
for i, sentence in enumerate(cand_sentences):
words=sentence.split(' ')
#if len(words)<=2 and words[-1]=='.':
if len(words)==1 and sentence[-1]=='.':
bad_state=True
sen_buffer = sen_buffer + ' ' + sentence
## NEW condition
elif sentence[-1]=='.' and is_acronym_char(words[-1][:-1],lang):
if len(sen_buffer)>0 and not bad_state:
final_sentences.append(sen_buffer)
bad_state=True
sen_buffer = sentence
elif bad_state:
sen_buffer = sen_buffer + ' ' + sentence
if len(sen_buffer)>0:
final_sentences.append(sen_buffer)
sen_buffer=''
bad_state=False
else: ## good state
if len(sen_buffer)>0:
final_sentences.append(sen_buffer)
sen_buffer=sentence
bad_state=False
if len(sen_buffer)>0:
final_sentences.append(sen_buffer)
return final_sentences
|
Python
| 0.999636 |
@@ -1000,19 +1000,20 @@
acronym_
-cha
+abbv
r(text,l
@@ -1036,16 +1036,39 @@
ars = %7B
+%0A ## acronym%0A
'%E0%A4%8F', '%E0%A4%8E'
@@ -1835,32 +1835,33 @@
'%E0%A5%9B%E0%A5%87%E0%A4%A1%E0%A5%8D',
+%0A
%0A %7D
@@ -1844,30 +1844,97 @@
%0A
-
+%0A ## abbreviation%0A '%E0%A4%B6%E0%A5%8D%E0%A4%B0%E0%A5%80',%0A '%E0%A4%A1%E0%A5%89',%0A '%E0%A4%95%E0%A5%81',%0A '%E0%A4%9A%E0%A4%BF',%0A '
%0A %7D%0A%0A
@@ -3359,19 +3359,20 @@
acronym_
-cha
+abbv
r(words%5B
|
5a582564e3fcf97aba0e9595807e1cdecb408210
|
add data integration to sync sequence
|
frappe/model/sync.py
|
frappe/model/sync.py
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
"""
Sync's doctype and docfields from txt files to database
perms will get synced only if none exist
"""
import frappe
import os
from frappe.modules.import_file import import_file_by_path
from frappe.modules.patch_handler import block_user
from frappe.utils import update_progress_bar
def sync_all(force=0, verbose=False, reset_permissions=False):
block_user(True)
for app in frappe.get_installed_apps():
sync_for(app, force, verbose=verbose, reset_permissions=reset_permissions)
block_user(False)
frappe.clear_cache()
def sync_for(app_name, force=0, sync_everything = False, verbose=False, reset_permissions=False):
files = []
if app_name == "frappe":
# these need to go first at time of install
for d in (("core", "docfield"), ("core", "docperm"), ("core", "has_role"), ("core", "doctype"),
("core", "user"), ("core", "role"), ("custom", "custom_field"),
("custom", "property_setter"), ("website", "web_form"),
("website", "web_form_field"), ("website", "portal_menu_item")):
files.append(os.path.join(frappe.get_app_path("frappe"), d[0],
"doctype", d[1], d[1] + ".json"))
for module_name in frappe.local.app_modules.get(app_name) or []:
folder = os.path.dirname(frappe.get_module(app_name + "." + module_name).__file__)
get_doc_files(files, folder, force, sync_everything, verbose=verbose)
l = len(files)
if l:
for i, doc_path in enumerate(files):
import_file_by_path(doc_path, force=force, ignore_version=True,
reset_permissions=reset_permissions, for_sync=True)
#print module_name + ' | ' + doctype + ' | ' + name
frappe.db.commit()
# show progress bar
update_progress_bar("Updating DocTypes for {0}".format(app_name), i, l)
# print each progress bar on new line
print()
def get_doc_files(files, start_path, force=0, sync_everything = False, verbose=False):
"""walk and sync all doctypes and pages"""
# load in sequence - warning for devs
document_types = ['doctype', 'page', 'report', 'print_format',
'website_theme', 'web_form', 'email_alert', 'print_style',
'data_migration_mapping', 'data_migration_plan']
for doctype in document_types:
doctype_path = os.path.join(start_path, doctype)
if os.path.exists(doctype_path):
for docname in os.listdir(doctype_path):
if os.path.isdir(os.path.join(doctype_path, docname)):
doc_path = os.path.join(doctype_path, docname, docname) + ".json"
if os.path.exists(doc_path):
if not doc_path in files:
files.append(doc_path)
|
Python
| 0 |
@@ -898,17 +898,20 @@
field%22),
-
+%0A%09%09%09
(%22core%22,
@@ -922,17 +922,20 @@
cperm%22),
-
+%0A%09%09%09
(%22core%22,
@@ -947,17 +947,20 @@
_role%22),
-
+%0A%09%09%09
(%22core%22,
@@ -992,17 +992,20 @@
%22user%22),
-
+%0A%09%09%09
(%22core%22,
@@ -1013,17 +1013,20 @@
%22role%22),
-
+%0A%09%09%09
(%22custom
@@ -1078,17 +1078,20 @@
etter%22),
-
+%0A%09%09%09
(%22websit
@@ -1140,17 +1140,20 @@
field%22),
-
+%0A%09%09%09
(%22websit
@@ -1175,16 +1175,221 @@
u_item%22)
+,%0A%09%09%09(%22data_migration%22, %22data_migration_mapping_detail%22),%0A%09%09%09(%22data_migration%22, %22data_migration_mapping%22),%0A%09%09%09(%22data_migration%22, %22data_migration_plan_mapping%22),%0A%09%09%09(%22data_migration%22, %22data_migration_plan%22)
):%0A%09%09%09fi
|
3a85eff683f9d717958d06faca71c2fb7aaa8394
|
fix encoding issues by decoding html before Souping it
|
trunc/web.py
|
trunc/web.py
|
# -*- coding: utf-8 -*-
"""
*********
trunc.web
*********
This module provides classes for accessing web pages."""
from __future__ import absolute_import, print_function
import time
from bs4 import BeautifulSoup as Soup
from urllib import FancyURLopener
from .util import fibonacci_number
class MyOpener(FancyURLopener):
"""A FancyURLopener object with a custom User-Agent field.
The ``MyOpener.version`` class attribute contains the User-Agent field.
Use ``MyOpener.set_version()`` to change this attribute.
"""
version = ("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) "
"AppleWebKit/600.5.17 (KHTML, like Gecko) "
"Version/8.0.5 Safari/600.5.17")
def set_version(self, new_version):
"""Define a new User-Agent field for the MyOpener class.
:param new_version: desired User-Agent field
:type new_version: ``str``
"""
MyOpener.version = new_version
class Webpage(object):
"""Generic webpage with attributes."""
def __init__(self, address, delay=1):
"""Initialize the Webpage object.
:param address: url of the webpage
:param delay: ideal delay interval, in seconds, between page loads
(default is ``1``)
"""
self.address = address
self.opener = MyOpener()
self.delay = delay
def open(self):
"""Open the webpage.
If there's an error opening the page (i.e., if the Corpus throttles
the scraper), wait and retry in successively longer intervals (which
increase according to the Fibonacci sequence) until the page loads
successfully.
:rtype: ``<'instance'>``
"""
attempt = 1
page_not_loaded = True
while page_not_loaded:
try:
time.sleep(self.delay)
self.page = self.opener.open(self.address)
page_not_loaded = False
except IOError as e:
print("\nIOError: {}\nat {}".format(e, self.address))
time.sleep(fibonacci_number(attempt))
attempt += 1
return self.page
def html(self):
"""Return contents of the Webpage as html."""
return self.open().read()
def soup(self):
"""Return contents of the Webpage as a BeautifulSoup object."""
return Soup(self.html())
|
Python
| 0.000002 |
@@ -167,16 +167,30 @@
nction%0A%0A
+import codecs%0A
import t
@@ -1071,16 +1071,41 @@
delay=1
+, encoding='windows-1251'
):%0A
@@ -1287,16 +1287,65 @@
%60%601%60%60)%0A
+ :param encoding: encoding of the webpage%0A
@@ -1438,16 +1438,49 @@
= delay
+%0A self.encoding = encoding
%0A%0A de
@@ -1481,20 +1481,20 @@
def
-open
+page
(self):%0A
@@ -2282,24 +2282,39 @@
ef html(self
+, encoding=None
):%0A %22
@@ -2354,24 +2354,90 @@
as html.%22%22%22%0A
+ if encoding is None:%0A encoding = self.encoding%0A
retu
@@ -2444,20 +2444,20 @@
rn self.
-open
+page
().read(
@@ -2457,16 +2457,25 @@
).read()
+.decode()
%0A%0A de
|
93cab6327aef7386dba6f293a22099272af6af10
|
create resouce only if not exist
|
src/infrastructure/annotations/requires.py
|
src/infrastructure/annotations/requires.py
|
'''
Created on Jun 19, 2013
@author: mpastern
'''
from src.resource.resourcemanager import ResourceManager
from src.errors.resourcemanagernotfounderror import ResourceManagerNotFoundError
class resources(object):
def __init__(self, params):
self.params = params
def __call__(self, original_func):
decorator_self = self
def wrappee(*args, **kwargs):
for resource in decorator_self.params:
rm = (resource.__name__ + 'ResourceManager')
rm_class = ResourceManager.getResourceManager(rm)
if rm_class:
rm_instance = rm_class()
# if not rm_instance.get():
# rm_instance.add()
rm_instance.add(**kwargs)
# TODO: use **kwargs for private init
else:
raise ResourceManagerNotFoundError(rm)
return original_func(*args, **kwargs)
return wrappee
|
Python
| 0 |
@@ -628,18 +628,16 @@
class()%0A
-#
@@ -675,55 +675,28 @@
get(
-):%0A# rm_instance.add()%0A
+get_only=True):%0A
|
53b867272c0d33c783fe1bd5dbb40baf14683910
|
Set top matches with GT as labeled in RCNN
|
frcnn/rcnn_target.py
|
frcnn/rcnn_target.py
|
import tensorflow as tf
import sonnet as snt
import numpy as np
from .utils.bbox_transform import bbox_transform, unmap
from .utils.bbox import bbox_overlaps
class RCNNTarget(snt.AbstractModule):
"""
Generate RCNN target tensors for both probabilities and bounding boxes.
TODO: We should unify this module with AnchorTarget.
"""
def __init__(self, num_classes, name='rcnn_proposal'):
super(RCNNTarget, self).__init__(name=name)
self._num_classes = num_classes
self._foreground_fraction = 0.25
self._batch_size = 64
self._foreground_threshold = 0.5
self._background_threshold_high = 0.5
self._background_threshold_low = 0.1
def _build(self, proposals, gt_boxes):
"""
Returns:
TODO: Review implementetion with py-faster-rcnn ProposalTargetLayer
TODO: It is possible to have two correct classes for a proposal?
"""
(proposals_label, bbox_targets) = tf.py_func(
self.proposal_target_layer,
[proposals, gt_boxes],
[tf.float32, tf.float32]
)
return proposals_label, bbox_targets
def proposal_target_layer(self, proposals, gt_boxes):
"""
First we need to calculate the true class of proposals based on gt_boxes.
Args:
proposals:
Shape (num_proposals, 5) -> (batch, x1, y1, x2, y2)
Are assumed to be inside the image.
gt_boxes:
Shape (num_gt, 4) -> (x1, y1, x2, y2)
Returns:
proposals_labels: (-1, 0, label) for each proposal.
Shape (num_proposals,)
bbox_targets: 4d bbox targets.
Shape (num_proposals, 4)
"""
np.random.seed(0) # TODO: For reproducibility.
# Remove batch id from proposals
proposals = proposals[:,1:]
overlaps = bbox_overlaps(
# We need to use float and ascontiguousarray because of Cython
# implementation of bbox_overlap
np.ascontiguousarray(proposals, dtype=np.float),
np.ascontiguousarray(gt_boxes, dtype=np.float)
)
# overlaps returns (num_proposals, num_gt_boxes) with the IoU of
# proposal P and ground truth box G in overlaps[P, G]
# We are going to label each proposal based on the IoU with `gt_boxes`.
# Start by filling the labels with -1, marking them as ignored.
proposals_label = np.empty((proposals.shape[0], ), dtype=np.float32)
proposals_label.fill(-1)
# For each overlap there is three possible outcomes for labelling:
# if max(iou) < 0.1 then we ignore
# elif max(iou) <= 0.5 then we label background
# elif max(iou) > 0.5 then we label with the highest IoU in overlap
max_overlaps = overlaps.max(axis=1)
# Label background
proposals_label[(max_overlaps > self._background_threshold_low) & (max_overlaps < self._background_threshold_high)] = 0
# Filter proposal that have labels
overlaps_with_label = max_overlaps >= self._foreground_threshold
# Get label for proposal with labels
overlaps_best_label = overlaps.argmax(axis=1)
# Having the index of the gt bbox with the best label we need to get the label for
# each gt box and sum it one because 0 is used for background.
# we only assign to proposals with `overlaps_with_label`.
proposals_label[overlaps_with_label] = (gt_boxes[:,4][overlaps_best_label] + 1)[overlaps_with_label]
# proposals_label now has [0, num_classes + 1] for proposals we are
# going to use and -1 for the ones we should ignore.
# Now we subsample labels and mark them as -1 in order to ignore them.
# Our batch of N should be: F% foreground (label > 0)
num_fg = int(self._foreground_fraction * self._batch_size)
fg_inds = np.where(proposals_label >= 1)[0]
if len(fg_inds) > num_fg:
disable_inds = np.random.choice(
fg_inds, size=(len(fg_inds) - num_fg), replace=False
)
# We disable them with their negatives just to be able to debug
# down the road.
proposals_label[disable_inds] = - proposals_label[disable_inds]
if len(fg_inds) < num_fg:
tf.logging.warning(
'We\'ve got only {} foreground samples instead of {}.'.format(
len(fg_inds), num_fg
))
# subsample negative labels
num_bg = self._batch_size - np.sum(proposals_label >= 1)
bg_inds = np.where(proposals_label == 0)[0]
if len(bg_inds) > num_bg:
disable_inds = np.random.choice(
bg_inds, size=(len(bg_inds) - num_bg), replace=False
)
proposals_label[disable_inds] = -1
"""
Next step is to calculate the proper targets for the proposals labeled
based on the values of the ground-truth boxes.
We have to use only the proposals labeled >= 1, each matching with
the proper gt_boxes
"""
# Get the ids of the proposals that matter for bbox_target comparisson.
proposal_with_target_idx = np.nonzero(proposals_label > 0)[0]
# Get top gt_box for every proposal, top_gt_idx shape (1000,) with values < gt_boxes.shape[0]
top_gt_idx = overlaps.argmax(axis=1)
# Get the corresponding ground truth box only for the proposals with target.
gt_boxes_ids = top_gt_idx[proposal_with_target_idx]
# Get the values of the ground truth boxes. This is shaped (num_proposals, 5) because we also have the label.
proposals_gt_boxes = gt_boxes[gt_boxes_ids]
# We create the same array but with the proposals
proposals_with_target = proposals[proposal_with_target_idx]
# We create our targets with bbox_transform
bbox_targets = bbox_transform(proposals_with_target, proposals_gt_boxes)
# TODO: We should normalize it in order for bbox_targets to have zero
# mean and unit variance according to the paper.
# We unmap `bbox_targets` to get back our final array shaped
# `(num_proposals, 4)` filling the proposals with bbox target with 0.
# We unmap targets to proposal_labels (containing the length of proposals)
bbox_targets = unmap(
bbox_targets, proposals_label.shape[0], proposal_with_target_idx, fill=0)
# TODO: Bbox targes now have shape (x, 4) but maybe it should have shape
# (num_proposals, num_classes * 4).
return proposals_label, bbox_targets
|
Python
| 0 |
@@ -3592,16 +3592,268 @@
label%5D%0A%0A
+ # Finally we get the closest proposal for each ground truth box and mark it as positive.%0A # TODO: Check when not tired%0A gt_argmax_overlaps = overlaps.argmax(axis=0)%0A proposals_label%5Bgt_argmax_overlaps%5D = gt_boxes%5B:,4%5D + 1%0A%0A
|
71b4c326e18ce7e3d0b6aaab5203b3a403a85810
|
Update solution_2.py
|
Problem033/Python/solution_2.py
|
Problem033/Python/solution_2.py
|
import math
frac=1.0
for b in range(1,10):
for a in range(1,b):
for c in range(1,10):
if (a*10+b)/(b*10+c)==a/c:
frac*=(a/c)
print(math.ceil(1/frac))
|
Python
| 0.000002 |
@@ -5,16 +5,18 @@
rt math%0A
+%0A%0A
frac=1.0
@@ -16,16 +16,17 @@
rac=1.0%0A
+%0A
for b in
@@ -161,16 +161,33 @@
*=(a/c)%0A
+ %0A
print(ma
|
6d7e597ce216093d52ecdcb7db5c087dc6040bb1
|
Fix initiation of settings object
|
fullcalendar/conf.py
|
fullcalendar/conf.py
|
from datetime import timedelta
from django.conf import settings as django_settings
default = {
'FULLCALENDAR_FIRST_WEEKDAY': 0,
'FULLCALENDAR_OCCURRENCE_DURATION': timedelta(hours=1),
'FULLCALENDAR_SITE_COLORS': {}
}
settings = object()
for key, value in default.items():
setattr(settings, key,
getattr(django_settings, key, value))
|
Python
| 0.000004 |
@@ -240,15 +240,41 @@
s =
-object(
+type('SettingsDummy', (), default
)%0A%0Af
|
741cda41b8a74cd21ce157c00c1a2b45f6e3ab5c
|
Use sys.maxint for first backfill max_id
|
twick/cli.py
|
twick/cli.py
|
#!/usr/bin/env python
import sys, os
import re
import argparse
import dataset
import logging
from datetime import datetime
from time import sleep
from getpass import getpass
from search import Search
from persistence import get_last_id, get_first_id, store_response
import settings
logging.basicConfig()
logger = logging.getLogger("twick")
DEFAULT_DB = "sqlite:///twick.sqlite"
CREDENTIAL_NAMES = [
"TWICK_API_KEY",
"TWICK_API_SECRET",
"TWICK_ACCESS_TOKEN",
"TWICK_ACCESS_TOKEN_SECRET"
]
def try_credential(name):
try: return os.environ[name]
except: raise Exception("Missing environment variable ${}".format(name))
def get_credentials(credential_array=None):
if credential_array: return credential_array
return map(try_credential, CREDENTIAL_NAMES)
linebreak_pattern = re.compile(r"[\n\r]+")
def format_tweet(t):
timestamp = t.timestamp.strftime(settings.date_format)
screen_name = t.raw["user"]["screen_name"]
text = t.raw["text"].strip()
formatted = u"Tweet @ {}: <{}>: {}".format(timestamp, screen_name, text)
newlines_removed = re.sub(linebreak_pattern, " ", formatted)
return newlines_removed
def log_response(response):
logger.info(u"Response @ {}: Found {} tweet(s).".format(
response.timestamp.strftime(settings.date_format),
len(response.tweets)))
map(logger.info, map(format_tweet, response.tweets))
pass
def cmd_fetch(args):
search = Search(args.credentials)
while True:
last_id = get_last_id(args.db)
response = search.query(args.query, since_id=last_id)
log_response(response)
store_response(args.db, response, args.store_raw)
sleep(args.throttle)
def cmd_backfill(args):
search = Search(args.credentials)
while True:
# Note: Unlike since_id, max_id is inclusive
# Cf.: https://dev.twitter.com/docs/working-with-timelines
first_id = get_first_id(args.db)
max_id = (first_id - 1) if first_id else None
response = search.query(args.query, max_id=max_id)
log_response(response)
store_response(args.db, response, args.store_raw)
if not len(response.tweets): break
else: sleep(args.throttle)
def dispatch_command(args):
commands = {
"fetch": cmd_fetch,
"backfill": cmd_backfill,
}
commands[args.command](args)
def add_shared_args(parser):
parser.add_argument("query")
parser.add_argument("--db",
type=dataset.connect,
help="SQLAlchemy connection string. Default: " + DEFAULT_DB,
default=DEFAULT_DB)
parser.add_argument("--throttle",
type=int,
default=15,
help="""Wait X seconds between requests.
Default: 15 (to stay under rate limits)""")
parser.add_argument("--credentials",
type=get_credentials,
help="""
Four space-separated strings for {}.
Defaults to environment variables by those names.
""".format(", ".join(CREDENTIAL_NAMES)),
default=get_credentials())
parser.add_argument("--store-raw",
help="Store raw tweet JSON, in addition to excerpted fields.",
action="store_true")
parser.add_argument("--quiet",
help="Silence logging.",
action="store_true")
def parse_args():
parser = argparse.ArgumentParser(prog="twick")
subparsers = parser.add_subparsers(title="subcommands", dest="command")
parser_fetch = subparsers.add_parser("fetch")
add_shared_args(parser_fetch)
parser_backfill = subparsers.add_parser("backfill")
add_shared_args(parser_backfill)
args = parser.parse_args()
return args
def main():
args = parse_args()
logger.setLevel(logging.WARNING if args.quiet else logging.INFO)
dispatch_command(args)
if __name__ == "__main__":
main()
|
Python
| 0.000023 |
@@ -1966,34 +1966,26 @@
_id
-- 1) if first_id else None
+or sys.maxint) - 1
%0A
|
52e614f811fb9dfcd0dde46de43f13731a3717a5
|
Reformat doc string for txStatHat.__init__
|
txstathat.py
|
txstathat.py
|
# -*- coding: utf-8 -*-
"""StatHat bindings"""
from __future__ import division, print_function, unicode_literals
import urllib
from twisted.web.client import getPage
try:
from OpenSSL import SSL # noqa
have_ssl = True
except:
have_ssl = False
API_URI = b'http{}://api.stathat.com/ez'.format(b's' if have_ssl else b'')
class txStatHat(object):
"""An API wrapper for StatHat.com."""
def __init__(self, ezkey):
"""Initialize a txStatHat instance.
*ezkey* is you API key, i.e. your e-mail address by default. Does no
network activity.
"""
self.default_args = {'ezkey': ezkey}
def _make_call(self, args):
"""Build postdata using ezkey and supplied dict *args* and post it."""
post_dict = self.default_args.copy()
post_dict.update(args)
d = getPage(
API_URI,
method=b'POST',
postdata=urllib.urlencode(post_dict),
headers={
b'Content-Type': b'application/x-www-form-urlencoded'
},
)
return d
def count(self, stat, count=1):
"""Add *count* to *stat*.
:param stat: a StatHat counter stat
:param count: the value to add to the counter. 1 by default.
:type count: integer
:rtype: twisted.internet.defer.Deferred
"""
return self._make_call({'stat': stat, 'count': unicode(count)})
def value(self, stat, value):
"""Submit *value* to *stat*.
:param stat: a StatHat value stat
:param value: the value to submit
:type value: float or decimal.Decimal
:rtype: twisted.internet.defer.Deferred
"""
return self._make_call({'stat': stat, 'value': unicode(value)})
|
Python
| 0 |
@@ -490,22 +490,61 @@
-*
+Does no network activity.%0A%0A :param
ezkey
-* is
+:
you
+r
API
@@ -589,42 +589,8 @@
ult.
- Does no%0A network activity.
%0A%0A
|
74d5f5c1fe49d0f574a923fc490cd064f3cd52c5
|
allow specifying random state
|
galstreams/random.py
|
galstreams/random.py
|
import astropy.coordinates as coord
import astropy.units as u
import numpy as np
__all__ = ['get_uniform_spherical_angles', 'get_uniform_sphere']
@u.quantity_input(lon_lim=u.deg, lat_lim=u.deg)
def get_uniform_spherical_angles(size=1,
lon_lim=[0., 360]*u.deg,
lat_lim=[-90., 90]*u.deg):
"""Generate uniform random positions on the sphere
Parameters
----------
size : int
The number of points to generate.
lon_lim : `~astropy.units.Quantity`
The longitude limits to generate as an astropy Angle object or Quantity
with angular units.
lat_lim : `~astropy.units.Quantity`
The latitude limits to generate as an astropy Angle object or Quantity
with angular units.
Returns
-------
representation : `~astropy.coordinates.UnitSphericalRepresentation`
An astropy unit spherical representation object containing the random
spherical positions.
"""
lon = np.random.uniform(lon_lim[0].value,
lon_lim[1].value,
size) * lon_lim.unit
K = np.sin(lat_lim[1]) - np.sin(lat_lim[0])
arg = K * np.random.uniform(size=size) + np.sin(lat_lim[0])
lat = np.arcsin(arg)
return coord.UnitSphericalRepresentation(lon, lat)
@u.quantity_input(lon_lim=u.deg, lat_lim=u.deg, dist_lim=[u.one, u.pc])
def get_uniform_sphere(size,
lon_lim=[0., 360]*u.deg,
lat_lim=[-90., 90]*u.deg,
dist_lim=[0, 1.]*u.one):
"""Generate uniform random positions inside a spherical volume.
i.e. this can be used to generate points uniformly distributed through a
spherical annulus by specifying the distance limits.
Parameters
----------
size : int
The number of points to generate.
lon_lim : `~astropy.units.Quantity`
The longitude limits to generate as an astropy Angle object or Quantity
with angular units.
lat_lim : `~astropy.units.Quantity`
The latitude limits to generate as an astropy Angle object or Quantity
with angular units.
dist_lim : `~astropy.units.Quantity`
The distance limits to generate as an astropy Quantity, either
dimensionless or with length units.
Returns
-------
representation : `~astropy.coordinates.SphericalRepresentation`
An astropy spherical representation object containing the random
spherical positions.
"""
# R distributed as R^2
r = np.cbrt(np.random.uniform(dist_lim[0].value**3,
dist_lim[1].value**3,
size=size)) * dist_lim.unit
rep = get_uniform_spherical_angles(size=size,
lon_lim=lon_lim,
lat_lim=lat_lim)
return coord.SphericalRepresentation(lon=rep.lon,
lat=rep.lat,
distance=r)
|
Python
| 0.000001 |
@@ -348,16 +348,68 @@
0%5D*u.deg
+,%0A random_state=None
):%0A %22
@@ -577,32 +577,43 @@
.units.Quantity%60
+ (optional)
%0A The lon
@@ -736,32 +736,43 @@
.units.Quantity%60
+ (optional)
%0A The lat
@@ -858,24 +858,179 @@
gular units.
+%0A random_state : %60numpy.random.RandomState%60 (optional)%0A A numpy random state object used to control the random number generator%0A and seed.
%0A%0A Return
@@ -1225,24 +1225,86 @@
ons.%0A %22%22%22
+%0A if random_state is None:%0A random_state = np.random
%0A%0A lon =
@@ -1497,25 +1497,28 @@
g = K *
-np.
random
+_state
.uniform
@@ -1632,16 +1632,17 @@
, lat)%0A%0A
+%0A
@u.quant
@@ -1876,16 +1876,58 @@
.%5D*u.one
+,%0A random_state=None
):%0A %22
@@ -2658,24 +2658,179 @@
ength units.
+%0A random_state : %60numpy.random.RandomState%60 (optional)%0A A numpy random state object used to control the random number generator%0A and seed.
%0A%0A Return
@@ -3025,90 +3025,175 @@
%22%22%22%0A
-%0A
-# R distributed as R%5E2%0A r = np.cbrt(np.random.uniform(dist_lim%5B0%5D.value**3
+if random_state is None:%0A random_state = np.random%0A%0A rep = get_uniform_spherical_angles(size=size,%0A lon_lim=lon_lim
,%0A
@@ -3228,28 +3228,28 @@
-dist_lim%5B1%5D.value**3
+ lat_lim=lat_lim
,%0A
@@ -3284,85 +3284,125 @@
-size=size)) * dist_lim.unit%0A
+ random_state=random_state)%0A%0A # R distributed as R%5E2
%0A r
-ep
=
-get_uniform_spherical_angles(size=size
+np.cbrt(random_state.uniform(dist_lim%5B0%5D.value**3
,%0A
@@ -3436,33 +3436,36 @@
- lon_lim=lon_lim
+dist_lim%5B1%5D.value**3
,%0A
@@ -3499,26 +3499,35 @@
- lat_lim=lat_lim)
+size=size)) * dist_lim.unit
%0A%0A
|
d668626739c8fe3de9a79743e95a209dd9596059
|
Add identifier attribute to reftrack node
|
src/jukeboxmaya/mayaplugins/jbreftrack.py
|
src/jukeboxmaya/mayaplugins/jbreftrack.py
|
import maya.OpenMayaMPx as OpenMayaMPx
import maya.OpenMaya as OpenMaya
from jukeboxcore.errors import PluginInitError, PluginUninitError
class JB_ReftrackNode(OpenMayaMPx.MPxNode):
"""A node to track references
Stores the associated refernce node, typ (asset, cache, shader, cam, lightrig etc), children, and jb_scenenode.
The reference node is used to track which reference node is responsible. Imported files do not have a connection to a ref node.
The type is responsible for the actions that take place when referencing, deleting etc.
Children are other reftrack nodes that should be deleted when reftrack is deleted.
The scene node is used to track which file is actually imported/refernced.
If the file is unloaded, there is no jb_scenenode but a reference node.
"""
kNodeName = 'jb_reftrack'
kPluginNodeId = OpenMaya.MTypeId(0x14B02)
types = ["None", "Asset", "Alembic", "Shader", "Camera", "Lightrig"]
"""A list of possible types of references, like asset, cache, shader, camera, lightrig."""
def __init__(self):
super(JB_ReftrackNode, self).__init__()
@classmethod
def initialize(cls):
enumAttr = OpenMaya.MFnEnumAttribute()
msgAttr = OpenMaya.MFnMessageAttribute()
typedAttr = OpenMaya.MFnTypedAttribute()
nAttr = OpenMaya.MFnNumericAttribute()
# typ enum attribute
cls.typ_attr = enumAttr.create('type', 'typ', 0)
enumAttr.setConnectable(False)
for i, t in enumerate(cls.types):
enumAttr.addField(t, i)
cls.addAttribute(cls.typ_attr)
# namespace attribute
cls.ns_attr = typedAttr.create("namespace", "ns", OpenMaya.MFnData.kString)
cls.addAttribute(cls.ns_attr)
# ref node attribute
cls.ref_attr = msgAttr.create("referencenode", "ref")
msgAttr.setReadable(False)
cls.addAttribute(cls.ref_attr)
# parent attribute
cls.parent_attr = msgAttr.create("parent", "p")
msgAttr.setWritable(False)
cls.addAttribute(cls.parent_attr)
# children attribute
cls.children_attr = msgAttr.create("children", "c")
msgAttr.setReadable(False)
msgAttr.setArray(True)
msgAttr.setIndexMatters(False)
cls.addAttribute(cls.children_attr)
# the jb_scene node attribute
cls.scenenode_attr = msgAttr.create("scenenode", "scene")
msgAttr.setWritable(False)
cls.addAttribute(cls.scenenode_attr)
# the taskfile_id in case, we do not have a jb_scene node to connect to
cls.taskfile_id = nAttr.create('taskfile_id', 'tfid', OpenMaya.MFnNumericData.kInt)
cls.addAttribute(cls.taskfile_id)
@classmethod
def creator(cls):
return OpenMayaMPx.asMPxPtr(cls())
@classmethod
def add_type(cls, typ):
"""Register a type for jb_reftrack nodes.
A type specifies how the reference should be handled. For example the type shader will connect shaders
with the parent when it the shaders are loaded.
Default types are :data:`JB_ReftrackNode.types`.
.. Note:: You have to add types before you initialize the plugin!
:param typ: a new type specifier, e.g. \"asset\"
:type typ: str
:returns: None
:rtype: None
:raises: :class:`TypeError`
"""
if not isinstance(typ, basestring):
raise TypeError("The type should be a string. But is %s" % type(typ))
cls.types.append(typ)
def initializePlugin(obj):
plugin = OpenMayaMPx.MFnPlugin(obj, 'David Zuber', '1.0', 'Any')
try:
plugin.registerNode(JB_ReftrackNode.kNodeName, JB_ReftrackNode.kPluginNodeId, JB_ReftrackNode.creator, JB_ReftrackNode.initialize)
except:
raise PluginInitError('Failed to register %s node' % JB_ReftrackNode.kNodeName)
def uninitializePlugin(obj):
plugin = OpenMayaMPx.MFnPlugin(obj)
try:
plugin.deregisterNode(JB_ReftrackNode.kPluginNodeId)
except:
raise PluginUninitError('Failed to unregister %s node' % JB_ReftrackNode.kNodeName)
|
Python
| 0 |
@@ -2719,16 +2719,249 @@
le_id)%0A%0A
+ # the identifier attribute, we need to order the reftracks in the id permanently%0A cls.identifier_attr = nAttr.create('identifier', 'id', OpenMaya.MFnNumericData.kInt, -1)%0A cls.addAttribute(cls.identifier_attr)%0A%0A
@cla
|
6f39dd808f91b6f018571315d13eaccf2f70cd24
|
Update Chapter22/primeNum.py improved rabinMiller() docstring readability
|
books/CrackingCodesWithPython/Chapter22/primeNum.py
|
books/CrackingCodesWithPython/Chapter22/primeNum.py
|
"""Prime Number Sieve
Implements a series of functions that determine if a given number is prime.
Attributes:
LOW_PRIMES (list): List containing prime numbers <= 100 (aka 'low primes').
Note:
* https://www.nostarch.com/crackingcodes/ (BSD Licensed)
"""
import math, random
def isPrimeTrialDiv(num: int) -> bool:
"""Is prime trial division
Uses the `trial division`_ algorithm for testing if a given number is prime.
Args:
num: Integer to determine if prime.
Returns:
True if num is a prime number, otherwise False.
.. _trial division:
https://en.wikipedia.org/wiki/Trial_division
"""
# All numbers less than 2 are not prime:
if num < 2:
return False
# See if num is divisible by any number up to the square root of num:
for i in range(2, int(math.sqrt(num)) + 1):
if num % i == 0:
return False
return True
def primeSieve(sieveSize: int) -> list:
"""Prime sieve
Calculates prime numbers using the `Sieve of Eratosthenes`_ algorithm.
Args:
sieveSize: Largest number to check if prime starting from zero.
Returns:
List containing prime numbers from 0 to given number.
.. _Sieve of Eratosthenes:
https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes
"""
sieve = [True] * sieveSize
sieve[0] = False # Zero and one are not prime numbers.
sieve[1] = False
# Create the sieve:
for i in range(2, int(math.sqrt(sieveSize)) + 1):
pointer = i * 2
while pointer < sieveSize:
sieve[pointer] = False
pointer += i
# Compile the list of primes:
primes = []
for i in range(sieveSize):
if sieve[i] is True:
primes.append(i)
return primes
def rabinMiller(num: int) -> bool:
"""Rabin-Miller primality test
Uses the `Rabin-Miller`_ primality test to test if a given number is prime.
Args:
num: Number to check if prime.
Returns:
True if num is prime, False otherwise.
Note:
* The Rabin-Miller primality test relies on unproven assumptions, therefore it can return false positives when
given a pseudoprime.
.. _Rabin-Miller:
https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test
"""
if num % 2 == 0 or num < 2:
return False # Rabin-Miller doesn't work on even integers.
if num == 3:
return True
s = num - 1
t = 0
while s % 2 == 0:
# Keep halving s until it is odd (and use t
# to count how many times we halve s):
s = s // 2
t += 1
for trials in range(5): # Try to falsify num's primality 5 times.
a = random.randrange(2, num - 1)
v = pow(a, s, num)
if v != 1: # This test does not apply if v is 1.
i = 0
while v != (num - 1):
if i == t - 1:
return False
else:
i = i + 1
v = (v ** 2) % num
return True
# Most of the time we can quickly determine if num is not prime
# by dividing by the first few dozen prime numbers. This is quicker
# than rabinMiller(), but does not detect all composites.
LOW_PRIMES = primeSieve(100)
def isPrime(num: int) -> bool:
"""Is prime
This function checks divisibility by LOW_PRIMES before calling
:func:`~books.CrackingCodesWithPython.Chapter22.primeNum.rabinMiller`.
Args:
num: Integer to check if prime.
Returns:
True if num is prime, False otherwise.
Note:
* If a number is divisible by a low prime number, it is not prime.
"""
if num < 2:
return False # 0, 1, and negative numbers are not prime.
if num in LOW_PRIMES:
return True # Low prime numbers are still prime numbers
# See if any of the low prime numbers can divide num:
for prime in LOW_PRIMES:
if num % prime == 0:
return False
# If all else fails, call rabinMiller() to determine if num is a prime:
return rabinMiller(num)
def generateLargePrime(keysize: int=1024) -> int:
"""Generate large prime number
Generates random numbers of given bit size until one is prime.
Args:
keysize: Number of bits prime number should be.
Returns:
Random prime number that is keysize bits in size.
Note:
* keysize defaults to 1024 bits.
"""
while True:
num = random.randrange(2**(keysize-1), 2**keysize)
if isPrime(num):
return num
|
Python
| 0 |
@@ -1901,20 +1901,21 @@
test to
-test
+check
if a gi
|
1a8a77accdc3e6e968dbb365abee717f8ca0ffb4
|
update videoserver for new mjpg-streamer
|
src/machinetalk/videoserver/videoserver.py
|
src/machinetalk/videoserver/videoserver.py
|
#!/usr/bin/python2
import os
import sys
import time
from stat import *
import subprocess
import threading
import socket
import argparse
import ConfigParser
from machinekit import service
from machinekit import config
class VideoDevice:
process = None
service = None
txtRecord = None
sdname = ''
framerate = 30
resolution = '640x480'
quality = 80
device = '/dev/video0'
bufferSize = 1
port = 0
dsname = ''
zmqUri = ''
class VideoServer(threading.Thread):
def __init__(self, inifile, host='', loopback=False,
svc_uuid=None, debug=False):
threading.Thread.__init__(self)
self.inifile = inifile
self.host = host
self.loopback = loopback
self.svc_uuid = svc_uuid
self.debug = debug
self.videoDevices = {}
self.cfg = ConfigParser.ConfigParser(defaults={'arguments': ''})
self.cfg.read(self.inifile)
if self.debug:
print (("video devices:", self.cfg.sections()))
for n in self.cfg.sections():
videoDevice = VideoDevice()
videoDevice.framerate = self.cfg.getint(n, 'framerate')
videoDevice.resolution = self.cfg.get(n, 'resolution')
videoDevice.quality = self.cfg.get(n, 'quality')
videoDevice.device = self.cfg.get(n, 'device')
videoDevice.bufferSize = self.cfg.getint(n, 'bufferSize')
videoDevice.arguments = self.cfg.get(n, 'arguments')
self.videoDevices[n] = videoDevice
if self.debug:
print (("framerate:", videoDevice.framerate))
print (("resolution:", videoDevice.resolution))
print (("quality:", videoDevice.quality))
print (("device:", videoDevice.device))
print (("bufferSize:", videoDevice.bufferSize))
print (("arguments:", videoDevice.arguments))
def startVideo(self, deviceId):
videoDevice = self.videoDevices[deviceId]
if videoDevice.process is not None:
print ("video device already running")
return
sock = socket.socket()
sock.bind(('', 0))
port = sock.getsockname()[1]
sock.close()
baseUri = 'tcp://'
if self.loopback:
baseUri += '127.0.0.1'
else:
baseUri += '*'
videoDevice.port = port
videoDevice.zmqUri = '%s:%i' % (baseUri, videoDevice.port)
videoDevice.dsname = videoDevice.zmqUri.replace('*', self.host)
if self.debug:
print ((
"dsname = ", videoDevice.dsname,
"port =", videoDevice.port))
libpath = '/usr/local/lib/'
os.environ['LD_LIBRARY_PATH'] = libpath
arguments = ""
if videoDevice.arguments is not '':
arguments = ' ' + videoDevice.arguments
command = ['mjpg_streamer -i \"' + libpath + 'input_uvc.so -n' +
' -f ' + str(videoDevice.framerate) +
' -r ' + videoDevice.resolution +
' -q ' + videoDevice.quality +
' -d ' + videoDevice.device +
'" -o \"' + libpath + 'output_zmqserver.so --address ' +
videoDevice.zmqUri +
' --buffer_size ' + str(videoDevice.bufferSize) + '\"' +
arguments]
if self.debug:
print (("command:", command))
videoDevice.process = subprocess.Popen(command, shell=True)
try:
videoDevice.service = service.Service(type='video',
svcUuid=self.svc_uuid,
dsn=videoDevice.dsname,
port=videoDevice.port,
host=self.host,
loopback=self.loopback,
debug=self.debug)
videoDevice.service.publish()
except Exception as e:
print (('cannot register DNS service', e))
def stopVideo(self, deviceId):
videoDevice = self.videoDevices[deviceId]
if videoDevice.process is None:
print ("video device not running")
return
videoDevice.service.unpublish()
videoDevice.process.terminate()
videoDevice.process = None
videoDevice.service = None
def run(self):
if self.debug:
print ("run called")
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
for n in self.videoDevices:
videoDevice = self.videoDevices[n]
if videoDevice.process is None:
continue
self.stopVideo(n)
def main():
parser = argparse.ArgumentParser(description='Videoserver provides a webcam interface for Machinetalk')
parser.add_argument('-i', '--ini', help='INI file', default='video.ini')
parser.add_argument('-d', '--debug', help='Enable debug mode', action='store_true')
parser.add_argument('webcams', help='List of webcams to stream', nargs='+')
args = parser.parse_args()
debug = args.debug
mkconfig = config.Config()
mkini = os.getenv("MACHINEKIT_INI")
if mkini is None:
mkini = mkconfig.MACHINEKIT_INI
if not os.path.isfile(mkini):
sys.stderr.write("MACHINEKIT_INI " + mkini + " does not exist\n")
sys.exit(1)
mki = ConfigParser.ConfigParser()
mki.read(mkini)
mkUuid = mki.get("MACHINEKIT", "MKUUID")
remote = mki.getint("MACHINEKIT", "REMOTE")
if remote == 0:
print("Remote communication is deactivated, videoserver will use the loopback interfaces")
print(("set REMOTE in " + mkini + " to 1 to enable remote communication"))
if debug:
print ("announcing videoserver")
hostname = '%(fqdn)s' # replaced by service announcement
video = VideoServer(args.ini,
svc_uuid=mkUuid,
host=hostname,
loopback=(not remote),
debug=debug)
video.setDaemon(True)
video.start()
for webcam in args.webcams:
video.startVideo(webcam)
while True:
time.sleep(1)
if __name__ == "__main__":
main()
|
Python
| 0 |
@@ -214,16 +214,78 @@
onfig%0A%0A%0A
+MJPG_STREAMER_PLUGIN_PATH = '/usr/local/lib/mjpg-streamer/'%0A%0A%0A
class Vi
@@ -2772,25 +2772,33 @@
h =
-'/usr/local/lib/'
+MJPG_STREAMER_PLUGIN_PATH
%0A
|
c7a4bfdeb8e20d4469dac85370f640cb944db0d9
|
Remove python2 support
|
src/chattymarkov/database/__init__.py
|
src/chattymarkov/database/__init__.py
|
"""Chattymarkov database submodule.
This submodule gathers all the supported database formats.
"""
import six
from .databases import JSONFileDatabase, MemoryDatabase, RedisDatabase
class ChattymarkovDatabaseError(Exception):
"""Base exception class for chattymarkov.database related errors."""
class UnknownDatabasePrefixError(ChattymarkovDatabaseError):
"""Exception class for unknown database prefixes errors."""
class InvalidConnectionStringError(ChattymarkovDatabaseError):
"""Exception class for invalid connection string error."""
_DATABASE_PREFIXES = {}
def database(prefix):
"""Wrap a function responsible for building a database."""
def wrapper(func):
"""Register `func` in the global `_DATABASE_PREFIXES` hash."""
_DATABASE_PREFIXES[prefix] = func
return func
return wrapper
def get_database_builder(prefix):
"""Get the function associated to `prefix` to instanciate a database.
This function is a simple interface around the `_DATABASE_PREFIXES` hash.
Args:
prefix (str): the prefix's database function.
Raises:
UnknownDatabasePrefixError: the prefix is not recognized.
Returns:
function: the function assiociated to the `prefix`.
"""
if prefix not in _DATABASE_PREFIXES:
raise UnknownDatabasePrefixError(
"Database prefix '{}' is unknown.".format(prefix))
return _DATABASE_PREFIXES[prefix]
def _get_connection_params(resource):
"""Extract connection and params from `resource`."""
args = resource.split(';')
if len(args) > 1:
return args[0], args[1:]
else:
return args[0], []
@database('redis')
def build_redis_database(resource):
"""Build a `RedisDatabase` instance to communicate with a redis server.
Args:
resource (str): a string that represents connection information.
Returns:
RedisDatabase: instance to communicate with the redis server.
"""
whitelist = {'password', 'db'}
extra_params = {}
connection, params = _get_connection_params(resource)
# Parse additional parameters, if any
if len(params) > 0:
for param in params:
key, equal, value = param.partition('=')
if key in whitelist:
extra_params[key] = value
if connection.startswith('/'):
# UNIX socket connection
return RedisDatabase(unix_socket_path=connection,
**extra_params)
else:
# TCP socket connection
host, colon, port = connection.partition(':')
if six.PY2:
port = unicode(port) # noqa
if host != '' and colon == ':' and port.isnumeric():
return RedisDatabase(host=host, port=int(port),
**extra_params)
@database('memory')
def build_memory_database(resource):
"""Build a `MemoryDatabase` instance.
Args:
resource (str): path to the memory location. It has actually no sense
at that time. Should be "memory://" anyway.
Returns:
MemoryDatabase: an instance of MemoryDatabase that handles a
connection to the desired database.
"""
return MemoryDatabase()
@database('json')
def build_json_database(resource):
"""Build a `JSONFileDatabase` instance.
Args:
resource (str): path to the JSON file representing the database. If
the file is not empty, it will be loaded. In every cases, upon
instance destruction, the database will be stored in the specified
file.
Returns:
JSONFileDatabase: an instance of JSONFileDatabase that handles a
connection to the desired database.
"""
return JSONFileDatabase(resource)
def build_database_connection(connect_string):
"""Build a database connection based on `connect_string`.
Args:
connect_string (str): connection string for the database connection.
Raises:
InvalidConnectionStringError: raised when the `connect_string` is
invalid.
UnknownDatabasePrefixError: raised when the database prefix is
unknown.
Returns:
AbstractDatabase: an instance of AbstractDatabase that handle a
connection to the desired database.
"""
prefix, colon_slash_slash, resource = connect_string.partition('://')
if colon_slash_slash != '':
builder = get_database_builder(prefix)
return builder(resource)
else:
raise InvalidConnectionStringError(
"Invalid connection string '{}'. Must be of the form "
"prefix://[resource[;param1=value1;param2=value2...]]".format(
prefix))
|
Python
| 0.000011 |
@@ -97,19 +97,8 @@
%22%22%22%0A
-import six%0A
from
@@ -2568,70 +2568,8 @@
')%0A%0A
- if six.PY2:%0A port = unicode(port) # noqa%0A%0A
|
a565a09ee0ed64b2879004db2f21e360b41d92cb
|
Update stock_quant_package_dimension/models/stock_quant_package.py
|
stock_quant_package_dimension/models/stock_quant_package.py
|
stock_quant_package_dimension/models/stock_quant_package.py
|
# Copyright 2019 Camptocamp SA
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl)
from odoo import api, fields, models
class StockQuantPackage(models.Model):
_inherit = "stock.quant.package"
pack_weight = fields.Float("Pack Weight")
pack_length = fields.Integer("Pack Length", help="length")
width = fields.Integer("Pack Width", help="width")
height = fields.Integer("Pack Height", help="height")
volume = fields.Float(
"Pack Volume",
digits=(8, 4),
compute="_compute_volume",
readonly=True,
store=False,
help="volume",
)
estimated_pack_weight_kg = fields.Float(
"Estimated weight (in kg)",
digits="Product Unit of Measure",
compute="_compute_estimated_pack_weight_kg",
help="Based on the weight of the product.",
)
length_uom_id = fields.Many2one(
# Same as product.packing
"uom.uom",
"Dimensions Units of Measure",
domain=lambda self: [
("category_id", "=", self.env.ref("uom.uom_categ_length").id)
],
help="UoM for pack length, height, width (based on lenght UoM)",
default=lambda self: self.env[
"product.template"
]._get_length_uom_id_from_ir_config_parameter(),
)
length_uom_name = fields.Char(
# Same as product.packing
string="Length unit of measure label",
related="length_uom_id.name",
readonly=True,
)
weight_uom_id = fields.Many2one(
# Same as product.packing
"uom.uom",
string="Weight Units of Measure",
domain=lambda self: [
("category_id", "=", self.env.ref("uom.product_uom_categ_kgm").id)
],
help="Weight Unit of Measure",
compute=False,
default=lambda self: self.env[
"product.template"
]._get_weight_uom_id_from_ir_config_parameter(),
)
weight_uom_name = fields.Char(
# Same as product.packing
string="Weight unit of measure label",
related="weight_uom_id.name",
readonly=True,
)
volume_uom_id = fields.Many2one(
# Same as product.packing
"uom.uom",
string="Volume Units of Measure",
domain=lambda self: [
("category_id", "=", self.env.ref("uom.product_uom_categ_vol").id)
],
help="Packaging volume unit of measure",
default=lambda self: self.env[
"product.template"
]._get_volume_uom_id_from_ir_config_parameter(),
)
volume_uom_name = fields.Char(
# Same as product.packing
string="Volume Unit of Measure label",
related="volume_uom_id.name",
readonly=True,
)
@api.depends("pack_length", "width", "height")
def _compute_volume(self):
Packaging = self.env["product.packaging"]
for pack in self:
pack.volume = Packaging._calculate_volume(
pack.pack_length,
pack.height,
pack.width,
pack.length_uom_id,
pack.volume_uom_id,
)
def auto_assign_packaging(self):
self = self.with_context(_auto_assign_packaging=True)
res = super().auto_assign_packaging()
return res
def write(self, vals):
res = super().write(vals)
if self.env.context.get("_auto_assign_packaging") and vals.get(
"product_packaging_id"
):
self._update_dimensions_from_packaging(override=False)
return res
def _update_dimensions_fields(self):
# source: destination
return {
"packaging_length": "pack_length",
"width": "width",
"height": "height",
"max_weight": "pack_weight",
"length_uom_id": "length_uom_id",
"weight_uom_id": "weight_uom_id",
"volume_uom_id": "volume_uom_id",
}
def _update_dimensions_from_packaging(self, override=False):
for package in self:
if not package.product_packaging_id:
continue
dimension_fields = self._update_dimensions_fields()
for source, dest in dimension_fields.items():
if not override and package[dest]:
continue
package[dest] = package.product_packaging_id[source]
@api.onchange("product_packaging_id")
def onchange_product_packaging_id(self):
self._update_dimensions_from_packaging(override=True)
def _get_picking_move_line_ids_per_package(self, picking_id):
if not picking_id:
return {}
move_lines = self.env["stock.move.line"].search(
[("result_package_id", "in", self.ids), ("picking_id", "=", picking_id)]
)
res = dict.fromkeys(self.ids, self.env["stock.move.line"])
for ml in move_lines:
res.setdefault(ml.result_package_id, set(ml.ids))
res[ml.result_package_id].add(ml.id)
return res
def _get_weight_kg_from_move_lines(self, move_lines):
uom_kg = self.env.ref("uom.product_uom_kgm")
return sum(
ml.product_uom_id._compute_quantity(
qty=ml.qty_done, to_unit=ml.product_id.uom_id
)
* ml.product_id.weight_uom_id._compute_quantity(
qty=ml.product_id.weight, to_unit=uom_kg
)
for ml in move_lines
)
def _get_weight_kg_from_quants(self, quants):
uom_kg = self.env.ref("uom.product_uom_kgm")
return sum(
quant.quantity
* quant.product_id.weight_uom_id._compute_quantity(
qty=quant.product_id.weight, to_unit=uom_kg
)
for quant in quants
)
@api.depends("quant_ids")
@api.depends_context("picking_id")
def _compute_estimated_pack_weight_kg(self):
# NOTE: copy-pasted and adapted from `delivery` module
# because we do not want to add the dependency against 'delivery' here.
picking_id = self.env.context.get("picking_id")
move_line_ids_per_package = self._get_picking_move_line_ids_per_package(
picking_id
)
for package in self:
weight = 0.0
if picking_id: # coming from a transfer
move_line_ids = move_line_ids_per_package.get(package, [])
move_lines = self.env["stock.move.line"].browse(move_line_ids)
weight = package._get_weight_kg_from_move_lines(move_lines)
else:
weight = package._get_weight_kg_from_quants(package.quant_ids)
package.estimated_pack_weight_kg = weight
|
Python
| 0 |
@@ -239,21 +239,8 @@
oat(
-%22Pack Weight%22
)%0A
|
b2b123b15f178e81737127a4dda399a31ebb5240
|
Update Dice_Probability.py
|
Week2-Python-Libraries-and-Concepts-Used-in-Research/Dice_Probability.py
|
Week2-Python-Libraries-and-Concepts-Used-in-Research/Dice_Probability.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 17 16:26:48 2017
@author: lamahamadeh
source:
-------
Video 2.4.2: Examples Involving Randomness
Week 2 Overview/Python Libraries and Concepts Used in Research
Using python for research
Harvard
online course provided by edx.org
url: https://courses.edx.org/courses/course-v1:HarvardX+PH526x+3T2016/courseware/317ce880d7644d35840b1f734be76b06/391063d8f58242e892efafc9903b36e8/
"""
#roll a dice 100 times and plot a histogram of the outcomes
#meaning: a histogram that shows how frequent the numbers from 1 to 6 appeared in the 100 samples
import numpy as np
import random
import matplotlib.pyplot as plt
random.choice([1,2,3,4,5,6]) #this line throws the dice one time
rolls = []
for k in range(100):#we can try 1000, 10000000 times. We can notice that the histogram gets more flat when the number of rolling times increases.
rolls.append(random.choice([1,2,3,4,5,6]))#in this case, after using for loop, we wre rolling the dice 100 times
print(len(rolls))
#draw a histogram
plt.figure()
plt.hist(rolls, bins = np.linspace(0.5,6.5,7));
plt.show()
#This time we will roll 10 dice not jsut one
ys = []
for rep in range(100):#By increasing the number of dice rolls for each dice the distrbution follows the central limit theorem
#The central limit theorem (CLT) states that the sum of a large number of random variables regardless of their distribution will
#approximately follow a normal distribution (or Gaussian distribution).
y = 0
for k in range (10):
x = random.choice([1,2,3,4,5,6])
y = y + x
ys.append(y)
print(len(ys)) #100
print(min(ys))
print(max(ys))
plt.figure()
plt.hist(ys); #the semicolon suppresses the output
plt.show()
|
Python
| 0.000001 |
@@ -102,16 +102,99 @@
hamadeh%0A
+%22%22%22%0A%0A#First: Python-based implementation %0A#------------------------------------%0A'''
%0Asource:
@@ -524,21 +524,19 @@
3b36e8/%0A
-%22%22%22%0A%0A
+'''
%0A#roll a
@@ -691,27 +691,8 @@
es%0A%0A
-import numpy as np%0A
impo
@@ -1857,12 +1857,507 @@
show()%0A%0A
+#------------------------------------------------------------------%0A%0A#Second: NumPy implementation%0A#---------------------------%0A'''%0Asource:%0A-------%0AVideo 2.4.3: using the NumPy Random Module%0AWeek 2 Overview/Python Libraries and Concepts Used in Research%0AUsing python for research%0AHarvard%0Aonline course provided by edx.org%0Aurl: https://courses.edx.org/courses/course-v1:HarvardX+PH526x+3T2016/courseware/317ce880d7644d35840b1f734be76b06/391063d8f58242e892efafc9903b36e8/%0A'''%0Aimport numpy as np%0A%0A%0A
%0A%0A%0A%0A
|
79ddd4d0038b16f8d19f31faeb338ed4f48bbadb
|
fix assembler_name return type
|
src/py/gopythongo/assemblers/virtualenv.py
|
src/py/gopythongo/assemblers/virtualenv.py
|
# -* encoding: utf-8 *-
import argparse
import os
from gopythongo.assemblers import BaseAssembler
from gopythongo.utils import ErrorMessage, highlight, run_process, print_info, create_script_path
class VirtualEnvAssembler(BaseAssembler):
@property
def assembler_name(self):
return "virtualenv"
def add_args(self, parser: argparse.ArgumentParser) -> None:
gr_pip = parser.add_argument_group("PIP Assembler options")
gr_pip.add_argument("--pip-opts", dest="pip_opts", action="append", default=[],
help="Any string specified here will be directly appended to all pip command-lines when it "
"is invoked, allowing you to specify arbitrary extra command-line parameters, like "
"--extra-index. Make sure that you use an equals sign, i.e. --pip-opts='' to avoid "
"'Unknown parameter' errors! http://bugs.python.org/issue9334")
gr_pip.add_argument("--upgrade-pip", dest="upgrade_pip", action="store_true", default=False,
help="If specified, GoPythonGo will update pip and virtualenv inside the build environment "
"to the newest available version before installing packages")
gr_setuppy = parser.add_argument_group("Setup.py Assembler options")
gr_setuppy.add_argument("--setuppy-install", dest="setuppy_install", action="append", default=[],
help="After all pip commands have run, this can run 'python setup.py install' on " +
"additional packages available in any filesystem path. This option can be " +
"used multiple times")
gr_python = parser.add_argument_group("Python ecosystem options")
gr_python.add_argument("--use-virtualenv", dest="virtualenv_binary", default="/usr/bin/virtualenv",
help="Set an alternative virtualenv binary to use inside the builder container")
gr_python.add_argument("--python-binary", dest="python_binary", default="python3",
help="Force virtualenv to use a certain Python version (Default: 'python3'). This will "
"be passed to virtualenv's -p parameter. You must change this if you want to build "
"and ship Python 2.x virtual environments.")
def validate_args(self, args: argparse.Namespace) -> None:
for path in args.setuppy_install:
if not (os.path.exists(path) and os.path.exists(os.path.join(path, "setup.py"))):
raise ErrorMessage("Cannot run setup.py in %s, because it does not exist" % highlight(path))
if not os.path.exists(args.virtualenv_binary) or not os.access(args.virtualenv_binary, os.X_OK):
raise ErrorMessage("virtualenv not found in path or not executable (%s).\n"
"You can specify an alternative path with %s" %
(args.virtualenv_binary, highlight("--use-virtualenv")))
def assemble(self, args: argparse.Namespace) -> None:
pip_binary = create_script_path(args.build_path, "pip")
run_pip = [pip_binary, "install"]
if args.pip_opts:
run_pip += args.pip_opts
if args.upgrade_pip:
print_info("Making sure that pip and virtualenv are up to date")
run_process(*run_pip + ["--upgrade", "pip", "virtualenv"])
print_info("Initializing virtualenv in %s" % args.build_path)
venv = [args.virtualenv_binary]
if args.python_binary:
venv += ["-p", args.python_binary]
venv += [args.build_path]
run_process(*venv)
print_info("Installing pip packages")
if args.packages:
run_process(*run_pip + args.packages)
envpy = create_script_path(args.build_path, "python")
if args.setuppy_install:
print_info("Installing setup.py packages")
for path in args.setuppy_install:
print()
print("******** %s ********" % highlight(os.path.join(path, "setup.py")))
os.chdir(path)
run_spy = [envpy, "setup.py", "install"]
run_process(*run_spy)
assembler_class = VirtualEnvAssembler
|
Python
| 0.00001 |
@@ -276,16 +276,23 @@
me(self)
+ -%3E str
:%0A
|
67c4d077ee4693290bf9883e90e4ed381b3cd227
|
Fix a mistake.
|
python/matplotlib/hist_logscale_xy.py
|
python/matplotlib/hist_logscale_xy.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# See:
# -
import numpy as np
import matplotlib.pyplot as plt
# SETUP #######################################################################
# histtype : [‘bar’ | ‘barstacked’ | ‘step’ | ‘stepfilled’]
HIST_TYPE='bar'
ALPHA=0.5
# MAKE DATA ###################################################################
data = np.random.exponential(size=1000000)
#data = np.abs(np.random.normal(size=1000000) * 10000.)
#data = np.random.chisquare(10, size=1000000)
# INIT FIGURE #################################################################
fig = plt.figure(figsize=(8.0, 6.0))
# AX1 #########################################################################
ax1 = fig.add_subplot(211)
res_tuple = ax1.hist(data,
bins=50,
histtype=HIST_TYPE,
alpha=ALPHA)
ax1.set_title("Normal scale")
ax1.set_xlabel("Value")
ax1.set_ylabel("Count")
# AX2 #########################################################################
ax2 = fig.add_subplot(212)
min = np.log10(data.min())
max = np.log10(data.max())
bins = np.logspace(min, max, 50) # <- create a range from 10**min to 10**max
print(bins)
res_tuple = ax2.hist(data,
log=True, # <- Activate log scale on Y axis
bins=bins,
histtype=HIST_TYPE,
alpha=ALPHA)
ax2.set_xscale("log") # <- Activate log scale on Y axis
ax2.set_title("Log scale")
ax2.set_xlabel("Value")
ax2.set_ylabel("Count")
# SHOW AND SAVE FILE ##########################################################
plt.tight_layout()
plt.savefig("hist_logscale_xy.png")
plt.show()
|
Python
| 0.003448 |
@@ -1050,16 +1050,17 @@
t(212)%0A%0A
+v
min = np
@@ -1078,16 +1078,17 @@
.min())%0A
+v
max = np
@@ -1129,21 +1129,23 @@
ace(
+v
min,
+v
max, 50)
@@ -1144,22 +1144,17 @@
50)
-
# %3C-
-creat
+mak
e a
@@ -1168,16 +1168,17 @@
rom 10**
+v
min to 1
@@ -1180,16 +1180,17 @@
to 10**
+v
max%0A%0Apri
|
7c2b503745d9898d5edbb03ef25b0936253ba44c
|
Make it DRY
|
nsone/__init__.py
|
nsone/__init__.py
|
#
# Copyright (c) 2014 NSONE, Inc.
#
# License under The MIT License (MIT). See LICENSE in project root.
#
from .config import Config
version = "0.9.15"
class NSONE:
def __init__(self, apiKey=None, config=None, configFile=None, keyID=None):
"""
Create a new top level NSONE API object
:param str apiKey: if given, initialize config with this API key \
(obtainable via creation in NSONE portal)
:param nsone.config.Config config: if given, uses a separately \
constructed and configured Config object
:param str configFile: if given, load configuration from the given \
json configuration file
:param str keyID: if given, use the specified key config in the \
multi-key configuration file
"""
self.config = config
if self.config is None:
self._loadConfig(apiKey, configFile)
if keyID:
self.config.useKeyID(keyID)
def _loadConfig(self, apiKey, configFile):
self.config = Config()
if apiKey:
self.config.createFromAPIKey(apiKey)
else:
configFile = Config.DEFAULT_CONFIG_FILE \
if not configFile else configFile
self.config.loadFromFile(configFile)
# REST INTERFACE
def zones(self):
"""
Return a new raw REST interface to zone resources
:rtype: :py:class:`nsone.rest.zones.Zones`
"""
import nsone.rest.zones
return nsone.rest.zones.Zones(self.config)
def records(self):
"""
Return a new raw REST interface to record resources
:rtype: :py:class:`nsone.rest.records.Records`
"""
import nsone.rest.records
return nsone.rest.records.Records(self.config)
def stats(self):
"""
Return a new raw REST interface to stats resources
:rtype: :py:class:`nsone.rest.stats.Stats`
"""
import nsone.rest.stats
return nsone.rest.stats.Stats(self.config)
def datasource(self):
"""
Return a new raw REST interface to datasource resources
:rtype: :py:class:`nsone.rest.data.Source`
"""
import nsone.rest.data
return nsone.rest.data.Source(self.config)
def datafeed(self):
"""
Return a new raw REST interface to feed resources
:rtype: :py:class:`nsone.rest.data.Feed`
"""
import nsone.rest.data
return nsone.rest.data.Feed(self.config)
def monitors(self):
"""
Return a new raw REST interface to monitors resources
:rtype: :py:class:`nsone.rest.monitoring.Monitors`
"""
import nsone.rest.monitoring
return nsone.rest.monitoring.Monitors(self.config)
def plan(self):
"""
Return a new raw REST interface to account plan
:rtype: :py:class:`nsone.rest.account.Plan`
"""
import nsone.rest.account
return nsone.rest.account.Plan(self.config)
# HIGH LEVEL INTERFACE
def loadZone(self, zone, callback=None, errback=None):
"""
Load an existing zone into a high level Zone object.
:param str zone: zone name, like 'example.com'
:rtype: :py:class:`nsone.zones.Zone`
"""
import nsone.zones
zone = nsone.zones.Zone(self.config, zone)
return zone.load(callback=callback, errback=errback)
def createZone(self, zone, zoneFile=None, callback=None, errback=None,
**kwargs):
"""
Create a new zone, and return an associated high level Zone object.
Several optional keyword arguments are available to configure the SOA
record.
If zoneFile is specified, upload the specific zone definition file
to populate the zone with.
:param str zone: zone name, like 'example.com'
:param str zoneFile: absolute path of a zone file
:keyword int retry: retry time
:keyword int refresh: refresh ttl
:keyword int expiry: expiry ttl
:keyword int nx_ttl: nxdomain TTL
:rtype: :py:class:`nsone.zones.Zone`
"""
import nsone.zones
zone = nsone.zones.Zone(self.config, zone)
return zone.create(zoneFile=zoneFile, callback=callback,
errback=errback, **kwargs)
def loadRecord(self, domain, type, zone=None, callback=None,
errback=None, **kwargs):
"""
Load an existing record into a high level Record object.
:param str domain: domain name of the record in the zone, for example \
'myrecord'. You may leave off the zone, since it must be \
specified in the zone parameter
:param str type: record type, such as 'A', 'MX', 'AAAA', etc.
:param str zone: zone name, like 'example.com'
:rtype: :py:class:`nsone.records`
"""
import nsone.zones
if zone is None:
# extract from record string
parts = domain.split('.')
if len(parts) <= 2:
zone = '.'.join(parts)
else:
zone = '.'.join(domain.split('.')[1:])
z = nsone.zones.Zone(self.config, zone)
return z.loadRecord(domain, type, callback=callback, errback=errback,
**kwargs)
|
Python
| 0.000996 |
@@ -5185,33 +5185,21 @@
.'.join(
-domain.split('.')
+parts
%5B1:%5D)%0A
|
69e1a8ddfc86c3ce63100e6fe91b65afc07fdce3
|
Change settings.py to use os.environ
|
oktan/settings.py
|
oktan/settings.py
|
"""
Django settings for oktan project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = ')1iio1^wieu994*3v5ilc)f1x(u7c5&jxb4@69*bu=m0li1vx2'
#
# # SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = True
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'oktansite.apps.OktansiteConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'oktan.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'builtins': [
'oktansite.templatetag.AssignNode'
]
},
},
]
WSGI_APPLICATION = 'oktan.wsgi.application'
ROOT_URLCONF = 'oktan.urls'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# User model
AUTH_USER_MODEL = "oktansite.Account"
# Backend settings
AUTHENTICATION_BACKENDS = (
'oktansite.backend.OktanBackend',
'django.contrib.auth.backends.ModelBackend',
)
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Jakarta'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
MEDIA_URL = '/media/'
MEDIA_ROOT = 'media/'
LOGIN_URL = '/login/'
STATIC_ROOT = '/static/'
|
Python
| 0 |
@@ -593,16 +593,18 @@
klist/%0A%0A
+#
# SECURI
@@ -829,22 +829,30 @@
T_KEY =
-config
+os.environ.get
('SECRET
@@ -868,20 +868,29 @@
BUG = co
-nfig
+s.environ.get
('DEBUG'
@@ -986,22 +986,30 @@
default=
-config
+os.environ.get
('DATABA
@@ -2488,17 +2488,35 @@
tabases%0A
-%0A
+# local database%0A#
DATABASE
@@ -2513,32 +2513,34 @@
# DATABASES = %7B%0A
+#
'default': %7B
@@ -2540,24 +2540,25 @@
ult': %7B%0A
+#
'ENGINE'
@@ -2549,16 +2549,17 @@
+
'ENGINE'
@@ -2590,16 +2590,18 @@
lite3',%0A
+#
@@ -2650,14 +2650,18 @@
'),%0A
+#
%7D%0A
+#
%7D%0A%0A%0A
|
7245554cc85cd9dc3d4151c754f7d0916af10a12
|
Version up 1.2.911
|
dedupsqlfs/__init__.py
|
dedupsqlfs/__init__.py
|
# -*- coding: utf8 -*-
# Documentation. {{{1
"""
This Python library implements a file system in user space using FUSE. It's
called DedupFS because the file system's primary feature is deduplication,
which enables it to store virtually unlimited copies of files because data
is only stored once.
In addition to deduplication the file system also supports transparent
compression using any of the compression methods zlib, bz2, lzma
and optionaly lzo, lz4, snappy, zstd.
These two properties make the file system ideal for backups: I'm currently
storing 250 GB worth of backups using only 8 GB of disk space.
The latest version is available at https://github.com/sergey-dryabzhinsky/dedupsqlfs
DedupFS is licensed under the MIT license.
Copyright 2010 Peter Odding <[email protected]>.
Copyright 2013-2015 Sergey Dryabzhinsky <[email protected]>.
"""
__name__ = "DedupSQLfs"
# for fuse mount
__fsname__ = "dedupsqlfs"
__fsversion__ = "3.1"
# Future 1.3
__version__ = "1.2.910"
# Check the Python version, warn the user if untested.
import sys
if sys.version_info[0] < 3 or \
(sys.version_info[0] == 3 and sys.version_info[1] < 2):
msg = "Warning: %s(%s, $s) has only been tested on Python 3.2, while you're running Python %d.%d!\n"
sys.stderr.write(msg % (__name__, __fsversion__, __version__, sys.version_info[0], sys.version_info[1]))
# Do not abuse GC - we generate alot objects
import gc
gc.set_threshold(100000, 2000, 200)
|
Python
| 0 |
@@ -988,17 +988,17 @@
%221.2.91
-0
+1
%22%0A%0A# Che
|
5bbb238a2ca137cb555a590d0c9a5673d88c8e7f
|
Version 1.2.943
|
dedupsqlfs/__init__.py
|
dedupsqlfs/__init__.py
|
# -*- coding: utf8 -*-
# Documentation. {{{1
"""
This Python library implements a file system in user space using FUSE. It's
called DedupFS because the file system's primary feature is deduplication,
which enables it to store virtually unlimited copies of files because data
is only stored once.
In addition to deduplication the file system also supports transparent
compression using any of the compression methods zlib, bz2, lzma
and optionaly lzo, lz4, snappy, zstd.
These two properties make the file system ideal for backups: I'm currently
storing 250 GB worth of backups using only 8 GB of disk space.
The latest version is available at https://github.com/sergey-dryabzhinsky/dedupsqlfs
DedupFS is licensed under the MIT license.
Copyright 2010 Peter Odding <[email protected]>.
Copyright 2013-2020 Sergey Dryabzhinsky <[email protected]>.
"""
__name__ = "DedupSQLfs"
# for fuse mount
__fsname__ = "dedupsqlfs"
__fsversion__ = "3.3"
# Future 1.3
__version__ = "1.2.942"
# Check the Python version, warn the user if untested.
import sys
if sys.version_info[0] < 3 or \
(sys.version_info[0] == 3 and sys.version_info[1] < 2):
msg = "Warning: %s(%s, $s) has only been tested on Python 3.2, while you're running Python %d.%d!\n"
sys.stderr.write(msg % (__name__, __fsversion__, __version__, sys.version_info[0], sys.version_info[1]))
# Do not abuse GC - we generate alot objects
import gc
if hasattr(gc, "set_threshold"):
gc.set_threshold(100000, 2000, 200)
|
Python
| 0 |
@@ -988,17 +988,17 @@
%221.2.94
-2
+3
%22%0A%0A# Che
|
51fd99b238667fa5e5d43647a9b29f76b536e18b
|
Check for "error" in the log file and if found fail the test.
|
tests/itests.py
|
tests/itests.py
|
# -*- coding: utf-8 -*-
# vim: set fileencodings=utf-8
#
# Docker Integration Tests
from __future__ import absolute_import
import re
import sys
import time
from psycopg2 import connect
from unittest import TestCase
class InvalidState(Exception):
pass
class DockerBaseTestCase(TestCase):
def __init__(self, layer):
super(DockerBaseTestCase, self).__init__('testRun')
self._layer = layer
self.cli = layer.client
self.container = None
self.name = 'crate'
self.is_running = False
def connect(self, port=55432):
crate_ip = '127.0.0.1'
if self.cli.info()['OperatingSystem'].startswith(u'Boot2Docker'):
import subprocess
crate_ip = subprocess.check_output(r'docker-machine ip',
stderr=None, shell=True).decode("utf-8").strip('\n')
return connect(host=crate_ip, port=port)
def start(self, cmd=['crate'], ports={}, env=[]):
if self.is_running:
raise InvalidState('Container is still running.')
ulimits = [dict(name='memlock', soft=-1, hard=-1)]
host_conf = self.cli.create_host_config(port_bindings=ports, ulimits=ulimits)
self.assertTrue(len(cmd) >= 1)
self.assertEquals(cmd[0], 'crate')
cmd[1:1] = [
'-Cbootstrap.memory_lock=true',
'-Cnetwork.host=_site_',
]
env[0:0] = [
'CRATE_HEAP_SIZE=128m',
]
self.container = self.cli.create_container(
image=self._layer.tag,
command=cmd,
ports=list(ports.keys()),
host_config=host_conf,
environment=env,
name=self.name
)
self.cli.start(self.container_id)
process = self.crate_process()
sys.stdout.write('Waiting for Docker container ...')
while not process:
sys.stdout.write('.')
time.sleep(0.1)
process = self.crate_process()
print('')
self.is_running = True
def setUp(self):
pass
def tearDown(self):
if self.container_id:
self.stop(self.container_id)
def stop(self, _id):
self.cli.stop(_id)
self.cli.remove_container(_id)
self.container = None
time.sleep(1)
self.is_running = False
@property
def container_id(self):
return self.container and self.container.get('Id') or None
def info(self, key=None):
top = self.cli and self.cli.top(self.name) or {}
return key and top.get(key) or top
def crate_process(self):
proc = self.info(u'Processes')
if not proc:
return ''
for p in proc[0]:
if p.startswith('java'):
return p
return ''
def logs(self):
return self.cli.logs(self.name)
def wait_for_cluster(self):
print('Waiting for Crate to start ...')
for line in self.cli.logs(self.name, stream=True):
l = line.decode("utf-8").strip('\n').strip()
print(l)
if l.endswith('started'):
break
def docker(cmd, ports={}, env=[]):
def wrap(fn):
def inner_fn(self, *args, **kwargs):
print(self.__class__.__doc__)
self.start(cmd=cmd, ports=ports, env=env)
fn(self)
return inner_fn
return wrap
class SimpleRunTest(DockerBaseTestCase):
"""
docker run crate crate
"""
@docker(['crate'], ports={}, env=[])
def testRun(self):
self.wait_for_cluster()
lg = self.logs().decode("utf-8").split('\n')
self.assertTrue('new_master' in lg[-3:][0])
self.assertTrue(lg[-2:][0].endswith('started'))
class JavaPropertiesTest(DockerBaseTestCase):
"""
docker run crate crate -Ccluster.name=foo crate -Cnode.name=bar
"""
@docker(['crate', '-Ccluster.name=foo', '-Cnode.name=bar'],
ports={5432:55432}, env=[])
def testRun(self):
self.wait_for_cluster()
conn = self.connect(port=55432)
with conn.cursor() as cursor:
# cluster name
cursor.execute('''select name from sys.cluster''')
res = cursor.fetchall()
self.assertEqual(res[0][0], 'foo')
# node name
cursor.execute('''select name from sys.nodes''')
res = cursor.fetchall()
self.assertEqual(res[0][0], 'bar')
conn.close()
class EnvironmentVariablesTest(DockerBaseTestCase):
"""
docker run --env CRATE_HEAP_SIZE=256m crate
"""
@docker(['crate'], ports={}, env=['CRATE_HEAP_SIZE=256m'])
def testRun(self):
self.wait_for_cluster()
# check -Xmx and -Xms process arguments
process = self.crate_process()
res = re.findall(r'-Xm[\S]+', process)
self.assertEqual('256m', res[0][len('-Xmx'):])
self.assertEqual('256m', res[0][len('-Xms'):])
class SigarStatsTest(DockerBaseTestCase):
"""
docker run crate
"""
@docker(['crate'], ports={5432:55432}, env=[])
def testRun(self):
self.wait_for_cluster()
conn = self.connect(port=55432)
with conn.cursor() as cursor:
cursor.execute("select load from sys.nodes limit 1")
self.assert_not_fallback_values(cursor.fetchall())
cursor.execute("select mem from sys.nodes limit 1")
self.assert_not_fallback_values(cursor.fetchall())
conn.close()
def assert_not_fallback_values(self, result):
for entry in result:
for _, value in entry[0].items():
self.assertNotEqual(value, -1)
class TarballRemovedTest(DockerBaseTestCase):
"""
docker run crate /bin/sh -c 'ls -la /crate-*'
"""
@docker(['crate'], ports={}, env=[])
def testRun(self):
self.wait_for_cluster()
id = self.cli.exec_create('crate', 'ls -la /crate-*')
res = self.cli.exec_start(id['Id'])
self.assertEqual(b'ls: /crate-*: No such file or directory\n', res)
|
Python
| 0 |
@@ -3065,16 +3065,96 @@
rint(l)%0A
+ if %22error%22 in l.lower():%0A self.fail(%22Error in logs%22)%0A
|
4f95023ef9214e669e540392e5b43ee5e4c4d28f
|
delete useless code
|
image_classification/caffe2paddle.py
|
image_classification/caffe2paddle.py
|
import os
import functools
import inspect
import struct
import numpy as np
import caffe
def __default_not_set_callback__(kwargs, name):
return name not in kwargs or kwargs[name] is None
def wrap_param_default(param_names=None,
default_factory=None,
not_set_callback=__default_not_set_callback__):
assert param_names is not None
assert isinstance(param_names, list) or isinstance(param_names, tuple)
for each_param_name in param_names:
assert isinstance(each_param_name, basestring)
def __impl__(func):
@functools.wraps(func)
def __wrapper__(*args, **kwargs):
if len(args) != 0:
argspec = inspect.getargspec(func)
num_positional = len(argspec.args)
if argspec.defaults:
num_positional -= len(argspec.defaults)
assert argspec.varargs or len(
args
) <= num_positional, "Must use keyword arguments for non-positional args"
for name in param_names:
if not_set_callback(kwargs, name): # Not set
kwargs[name] = default_factory(func)
return func(*args, **kwargs)
if hasattr(func, "argspec"):
__wrapper__.argspec = func.argspec
else:
__wrapper__.argspec = inspect.getargspec(func)
return __wrapper__
return __impl__
class DefaultNameFactory(object):
def __init__(self, name_prefix):
self.__counter__ = 0
self.__name_prefix__ = name_prefix
def __call__(self, func):
if self.__name_prefix__ is None:
self.__name_prefix__ = func.__name__
tmp = "__%s_%d__" % (self.__name_prefix__, self.__counter__)
self.__check_name__(tmp)
self.__counter__ += 1
return tmp
def __check_name__(self, nm):
pass
def reset(self):
self.__counter__ = 0
def wrap_name_default(name_prefix=None, name_param="name"):
"""
Decorator to set "name" arguments default to "{name_prefix}_{invoke_count}".
.. code:: python
@wrap_name_default("some_name")
def func(name=None):
print name # name will never be None. If name is not set,
# name will be "some_name_%d"
:param name_prefix: name prefix. wrapped function"s __name__ if None.
:type name_prefix: basestring
:return: a decorator to set default name
:rtype: callable
"""
factory = DefaultNameFactory(name_prefix)
return wrap_param_default([name_param], factory)
class ModelConverter(object):
def __init__(self, caffe_model_file, caffe_pretrained_file,
paddle_output_path):
self.net = caffe.Net(caffe_model_file, caffe_pretrained_file,
caffe.TEST)
self.output_path = paddle_output_path
self.pre_layer_name = ""
self.pre_layer_type = ""
def convert(self):
layer_dict = self.net.layer_dict
for layer_name in layer_dict.keys():
layer = layer_dict[layer_name]
layer_params = layer.blobs
layer_type = layer.type
print layer_name, layer_type, len(layer_params)
if layer_type == "BatchNorm":
print layer_params[0].data.shape, layer_params[
1].data.shape, layer_params[2].data, type(
layer_params[0].data)
#print dir(layer)
#continue
if len(layer_params) > 0:
self.pre_layer_name = getattr(
self, "convert_" + layer_type + "_layer")(layer_params)
self.pre_layer_type = layer_type
return
@staticmethod
def write_parameter(outfile, feats):
version = 0
value_size = 4
fo = open(outfile, "wb")
header = ""
header += struct.pack("i", version)
header += struct.pack("I", value_size)
header += struct.pack("Q", feats.size)
fo.write(header + feats.tostring())
@wrap_name_default("conv")
def convert_Convolution_layer(self, params, name=None):
for i in range(len(params)):
data = np.array(params[i].data)
if len(params) == 2:
suffix = "0" if i == 0 else "bias"
file = os.path.join(self.output_path,
"_%s.w%s" % (name, suffix))
else:
file = os.path.join(self.output_path,
"_%s.w%s" % (name, str(i)))
ModelConverter.write_parameter(file, data.flatten())
return name
@wrap_name_default("fc_layer")
def convert_InnerProduct_layer(self, params, name=None):
for i in range(len(params)):
data = np.array(params[i].data)
if len(params) == 2:
suffix = "0" if i == 0 else "bias"
file = os.path.join(self.output_path,
"_%s.w%s" % (name, suffix))
else:
file = os.path.join(self.output_path,
"_%s.w%s" % (name, str(i)))
data = np.transpose(data)
ModelConverter.write_parameter(file, data.flatten())
return name
@wrap_name_default("batch_norm")
def convert_BatchNorm_layer(self, params, name=None):
scale = np.array(params[-1].data)
for i in range(2):
data = np.array(params[i].data) * scale
file = os.path.join(self.output_path,
"_%s.w%s" % (name, str(i + 1)))
ModelConverter.write_parameter(file, data.flatten())
return name
def convert_Scale_layer(self, params, name=None):
assert self.pre_layer_type == "BatchNorm"
name = self.pre_layer_name
for i in range(len(params)):
data = np.array(params[i].data)
suffix = "0" if i == 0 else "bias"
file = os.path.join(self.output_path, "_%s.w%s" % (name, suffix))
ModelConverter.write_parameter(file, data.flatten())
return name
if __name__ == "__main__":
converter = ModelConverter("./ResNet-101-deploy.prototxt",
"./ResNet-101-model.caffemodel",
"./caffe2paddle_resnet/")
converter.convert()
|
Python
| 0.000021 |
@@ -3206,335 +3206,8 @@
ype%0A
- print layer_name, layer_type, len(layer_params)%0A if layer_type == %22BatchNorm%22:%0A print layer_params%5B0%5D.data.shape, layer_params%5B%0A 1%5D.data.shape, layer_params%5B2%5D.data, type(%0A layer_params%5B0%5D.data)%0A #print dir(layer)%0A #continue%0A
|
e300d739bf0040b76a0deee75cc01b1410ba8953
|
change image field to name in CatalogoLandsat serializer
|
indicarprocess/tmsapi/serializers.py
|
indicarprocess/tmsapi/serializers.py
|
# -*- coding: utf-8 -*-
from rest_framework.serializers import ModelSerializer, SerializerMethodField
from catalogo.models import CatalogoLandsat
class LandsatSerializer(ModelSerializer):
southwest = SerializerMethodField()
northeast = SerializerMethodField()
class Meta:
model = CatalogoLandsat
fields = ['image', 'data', 'southwest', 'northeast']
def get_bounds(self, obj):
lats = []
lons = []
for lat, lon in obj.shape.coords[0]:
lats.append(lat)
lons.append(lon)
lats.sort()
lons.sort()
return [[lats[-1], lons[-1]], [lats[0], lons[0]]]
def get_southwest(self, obj):
return self.get_bounds(obj)[-1]
def get_northeast(self, obj):
return self.get_bounds(obj)[0]
|
Python
| 0 |
@@ -263,16 +263,51 @@
dField()
+%0A name = SerializerMethodField()
%0A%0A cl
@@ -371,12 +371,11 @@
= %5B'
-imag
+nam
e',
@@ -827,8 +827,83 @@
obj)%5B0%5D%0A
+%0A def get_name(self, obj):%0A return obj.image.replace('.tif', '')%0A
|
05a8f2a2e499b25472fbaf1b06e899f589a7101f
|
fix migration
|
editor/migrations/0003_auto_20150125_0430.py
|
editor/migrations/0003_auto_20150125_0430.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from editor.models import Source, Category
# add root data for Source and Category model
def add_root_data(apps, schema_editor):
cat = Category(name ="root", parent=None)
cat.save()
source = Source(
name = "root",
abbreviation = "root",
domain = "",
homepage = "",
about = "",
parent = None,
)
source.save()
source.categories.add(cat)
def revert(apps, schema_editor):
for source in Source.objects.all():
source.delete()
for category in Category.objects.all():
category.delete()
class Migration(migrations.Migration):
dependencies = [
('editor', '0002_auto_20150124_1912'),
]
operations = [
migrations.RunPython(add_root_data, reverse_code=revert),
]
|
Python
| 0.000001 |
@@ -141,17 +141,25 @@
Category
+, Format
%0A
-
%0A# add r
@@ -517,16 +517,72 @@
d(cat)%0A%0A
+ f = Format(name =%22root%22, parent=None)%0A f.save()%0A%0A
def reve
@@ -739,16 +739,70 @@
delete()
+%0A for f in Format.objects.all():%0A f.delete()
%0A%0Aclass
@@ -995,14 +995,15 @@
evert),%0A
-
%5D%0A
+%0A
|
1cfd11d1a6aa1d949067e5b24b5bfb2cca10ad09
|
add index
|
requestspool/paction/all.py
|
requestspool/paction/all.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2014 windpro
Author : windpro
E-mail : [email protected]
Date : 14/12/26
Desc :
"""
from httpappengine.decorator import url
from httpappengine.helper import not_found
from httplib import responses
from requestspool.util import get_route
def all_req(path_url, environ, start_response):
method = environ.get('REQUEST_METHOD').upper()
if not (path_url.startswith(u"http://") or path_url.startswith(u"https://")):
path_url = u"http://" + unicode(path_url)
req_query_string = environ.get("QUERY_STRING", "")
try:
# 获取data
req_data = environ['wsgi.input'].read(int(environ.get('CONTENT_LENGTH', '0')))
except:
req_data = None
requestpool_headers = {}
req_headers = {}
for key, val in environ.iteritems():
if key.startswith('HTTP_'):
# 生成req_headers 暂无需求
header_name = key[5:].replace('_', '-')
if header_name == 'host'.upper():
continue
if 'REQUESTSPOOL.' in header_name:
requestpool_headers[header_name] = val
else:
req_headers[header_name] = val
route = get_route(path_url)
status_code, headers, output = route.http_result(requestpool_headers=requestpool_headers,
url=path_url, method=method, req_query_string=req_query_string,
req_data=req_data, req_headers=req_headers)
start_response(
"{0} {1}".format(status_code, responses.get(status_code, 'OK')),
headers.items())
return output
@url("/http://<path:path_url>", "GET,POST,PUT,PATCH,DELETE,HEAD,OPTIONS")
def http_req(path_url, environ, start_response):
# return all_req(u'http://'+path_url, environ, start_response)
return all_req(u'http://'+path_url, environ, start_response)
@url("/https://<path:path_url>", "GET,POST,PUT,PATCH,DELETE,HEAD,OPTIONS")
def https_req(path_url, environ, start_response):
return all_req(u'https://'+path_url, environ, start_response)
@url("/admin/route/add", "POST")
def route_add(environ, start_response):
# 尚未实现
return not_found(start_response)
@url("/admin/route/all", "GET")
def route_show_all(environ, start_response):
# 尚未实现
return not_found(start_response)
@url("/check", "GET")
def check(environ, start_response):
# 检测get_route
get_route('http://test')
s = "Running!\n"
start_response("200 OK", [
("Content-Type", "text/plain"),
("Content-Length", str(len(s)))
])
return s
|
Python
| 0.000607 |
@@ -313,19 +313,16 @@
route%0A%0A%0A
-%0A%0A%0A
def all_
@@ -2645,8 +2645,104 @@
return s
+%0A%0A@url(%22/%22, %22GET%22)%0Adef index(environ, start_response):%0A return check(environ, start_response)
|
110a74f839b5e6158d380f20d28652fb4dec8e2c
|
Solve issue #163
|
lib/tower_cli/resources/project.py
|
lib/tower_cli/resources/project.py
|
# Copyright 2015, Ansible, Inc.
# Luke Sneeringer <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from tower_cli import models, get_resource, resources
from tower_cli.api import client
from tower_cli.utils import debug, exceptions as exc, types
class Resource(models.Resource, models.MonitorableResource):
cli_help = 'Manage projects within Ansible Tower.'
endpoint = '/projects/'
name = models.Field(unique=True)
description = models.Field(required=False, display=False)
organization = models.Field(type=types.Related('organization'),
display=False, required=False)
scm_type = models.Field(
type=types.MappedChoice([
('', 'manual'),
('git', 'git'),
('hg', 'hg'),
('svn', 'svn'),
]),
)
scm_url = models.Field(required=False)
local_path = models.Field(
help_text='For manual projects, the server playbook directory name',
required=False)
scm_branch = models.Field(required=False, display=False)
scm_credential = models.Field(
'credential', display=False, required=False,
type=types.Related('credential'),
)
scm_clean = models.Field(type=bool, required=False, display=False)
scm_delete_on_update = models.Field(type=bool, required=False,
display=False)
scm_update_on_launch = models.Field(type=bool, required=False,
display=False)
@click.option('--monitor', is_flag=True, default=False,
help='If sent, immediately calls `project monitor` on the '
'project rather than exiting with a success.'
'It polls for status until the SCM is updated.')
@click.option('--timeout', required=False, type=int,
help='If provided with --monitor, the SCM update'
' will time out after the given number of seconds. '
'Does nothing if --monitor is not sent.')
def create(self, organization=None, monitor=False, timeout=None,
fail_on_found=False, force_on_exists=False,
**kwargs):
"""Create a new item of resource, with or w/o org.
This would be a shared class with user, but it needs the ability
to monitor if the flag is set.
"""
post_associate = False
if organization:
# Processing the organization flag depends on version
debug.log('Checking Organization Relationship.', header='details')
r = client.options('/projects/')
if 'organization' in r.json()['actions']['POST']:
kwargs['organization'] = organization
else:
post_associate = True
# First, run the create method, ignoring the organization given
answer = super(Resource, self).write(
create_on_missing=True,
fail_on_found=fail_on_found, force_on_exists=force_on_exists,
**kwargs
)
project_id = answer['id']
# If an organization is given, associate it here
if post_associate:
# Get the organization from Tower, will lookup name if needed
org_resource = get_resource('organization')
org_data = org_resource.get(organization)
org_pk = org_data['id']
debug.log("associating the project with its organization",
header='details', nl=1)
org_resource._assoc('projects', org_pk, project_id)
# if the monitor flag is set, wait for the SCM to update
if monitor:
return self.monitor(project_id, timeout=timeout)
return answer
@resources.command(use_fields_as_options=(
'name', 'description', 'scm_type', 'scm_url', 'local_path',
'scm_branch', 'scm_credential', 'scm_clean', 'scm_delete_on_update',
'scm_update_on_launch'
))
def modify(self, pk=None, create_on_missing=False, **kwargs):
"""Modify an already existing.
To edit the project's organizations, see help for organizations.
Fields in the resource's `identity` tuple can be used in lieu of a
primary key for a lookup; in such a case, only other fields are
written.
To modify unique fields, you must use the primary key for the lookup.
"""
# Associated with issue #52, the organization can't be modified
# with the 'modify' command. This would create confusion about
# whether its flag is an identifier versus a field to modify.
return super(Resource, self).write(
pk, create_on_missing=create_on_missing,
force_on_exists=True, **kwargs
)
@resources.command(use_fields_as_options=('name', 'organization'))
@click.option('--monitor', is_flag=True, default=False,
help='If sent, immediately calls `job monitor` on the newly '
'launched job rather than exiting with a success.')
@click.option('--timeout', required=False, type=int,
help='If provided with --monitor, this command (not the job)'
' will time out after the given number of seconds. '
'Does nothing if --monitor is not sent.')
def update(self, pk=None, create_on_missing=False, monitor=False,
timeout=None, name=None, organization=None):
"""Trigger a project update job within Ansible Tower.
Only meaningful on non-manual projects.
"""
# First, get the appropriate project.
# This should be uniquely identified at this point, and if not, then
# we just want the error that `get` will throw to bubble up.
project = self.get(pk, name=name, organization=organization)
pk = project['id']
# Determine whether this project is able to be updated.
debug.log('Asking whether the project can be updated.',
header='details')
result = client.get('/projects/%d/update/' % pk)
if not result.json()['can_update']:
raise exc.CannotStartJob('Cannot update project.')
# Okay, this project can be updated, according to Tower.
# Commence the update.
debug.log('Updating the project.', header='details')
result = client.post('/projects/%d/update/' % pk)
# If we were told to monitor the project update's status, do so.
if monitor:
return self.monitor(pk, timeout=timeout)
# Return the project update ID.
return {
'changed': True,
}
@resources.command
@click.option('--detail', is_flag=True, default=False,
help='Print more detail.')
def status(self, pk=None, detail=False, **kwargs):
"""Print the current job status."""
# Get the job from Ansible Tower.
debug.log('Asking for project update status.', header='details')
project = client.get('/projects/%d/' % pk).json()
# Determine the appropriate project update.
if 'current_update' in project['related']:
debug.log('A current update exists; retrieving it.',
header='details')
job = client.get(project['related']['current_update'][7:]).json()
elif project['related'].get('last_update', None):
debug.log('No current update exists; retrieving the most '
'recent update.', header='details')
job = client.get(project['related']['last_update'][7:]).json()
else:
raise exc.NotFound('No project updates exist.')
# In most cases, we probably only want to know the status of the job
# and the amount of time elapsed. However, if we were asked for
# verbose information, provide it.
if detail:
return job
# Print just the information we need.
return {
'elapsed': job['elapsed'],
'failed': job['failed'],
'status': job['status'],
}
|
Python
| 0.001415 |
@@ -4192,32 +4192,65 @@
if monitor
+ and answer.get('changed', False)
:%0A re
|
53df723a1574e62b4a74d56667c131793cf6c506
|
add retrieve all users and one user queries
|
users_handler.py
|
users_handler.py
|
from models.users import User
class UsersHandler:
def __init__(self, DB):
self.db = DB
def create_user(self, user_data):
collection = self.db.users
new_user = User(user_data)
collection.insert_one(new_user.__dict__)
|
Python
| 0.000001 |
@@ -22,16 +22,110 @@
ort User
+%0Aimport logging%0A%0Alogging.basicConfig(format='%25(levelname)s: %25(message)s', level=logging.DEBUG)
%0A%0Aclass
@@ -261,16 +261,113 @@
b.users%0A
+ user = collection.find_one(%7B%22username%22: user_data%5B%22username%22%5D%7D)%0A if not user:%0A
@@ -393,16 +393,20 @@
r_data)%0A
+
@@ -446,8 +446,636 @@
dict__)%0A
+ logging.info(%22User Created%22)%0A return True%0A logging.warning(%22User already exists%22)%0A return False%0A%0A def retrieve_users(self):%0A collection= self.db.users%0A users = collection.find()%0A logging.info(%22All users retrived successfully%22)%0A return users%0A%0A def retrieve_user(self, username):%0A collection = self.db.users%0A user = collection.find_one(%7B'username': username%7D)%0A if user:%0A logging.info(%22Users retrived successfully%22)%0A return user%0A logging.error(%22User %3C%7B%7D%3E does not exists%22.format(username))%0A return None%0A
|
efdf57f4f688c66402c5b18152f2448a100a55a9
|
Generate departures region by region
|
busstops/management/commands/generate_departures.py
|
busstops/management/commands/generate_departures.py
|
from datetime import date, timedelta
from django.core.management.base import BaseCommand
from django.db import transaction
from txc import txc
from ...models import Service, Journey, StopUsageUsage, StopPoint
from ...utils import get_files_from_zipfile
ONE_DAY = timedelta(days=1)
def handle_timetable(service, timetable, day):
if day.weekday() not in timetable.operating_profile.regular_days:
return
if not timetable.operating_period.contains(day):
return
# if not hasattr(timetable, 'groupings'):
# return
for grouping in timetable.groupings:
stops = {row.part.stop.atco_code for row in grouping.rows}
existent_stops = StopPoint.objects.filter(atco_code__in=stops).values_list('atco_code', flat=True)
for vj in grouping.journeys:
if not vj.should_show(day):
continue
date = day
previous_time = None
stopusageusages = []
journey = Journey(service=service, datetime='{} {}'.format(date, vj.departure_time))
for i, (su, time) in enumerate(vj.get_times()):
if previous_time and previous_time > time:
date += ONE_DAY
if su.stop.atco_code in existent_stops:
if not su.activity or su.activity.startswith('pickUp'):
stopusageusages.append(
StopUsageUsage(datetime='{} {}'.format(date, time),
order=i, stop_id=su.stop.atco_code)
)
journey.destination_id = su.stop.atco_code
previous_time = time
if journey.destination_id:
journey.save()
for suu in stopusageusages:
suu.journey = journey
StopUsageUsage.objects.bulk_create(stopusageusages)
class Command(BaseCommand):
@transaction.atomic
def handle(self, *args, **options):
Journey.objects.all().delete()
day = date.today()
for service in Service.objects.filter(current=True,
region__in=('EA',)).exclude(region__in=('L', 'Y', 'NI')):
print(service)
for i, xml_file in enumerate(get_files_from_zipfile(service)):
timetable = txc.Timetable(xml_file, None)
handle_timetable(service, timetable, day)
j = 1
while j < 7:
handle_timetable(service, timetable, day + ONE_DAY * j)
j += 1
|
Python
| 0.999999 |
@@ -157,16 +157,24 @@
s import
+ Region,
Service
@@ -340,16 +340,60 @@
%0A if
+hasattr(timetable, 'operating_profile') and
day.week
@@ -1942,16 +1942,560 @@
ages)%0A%0A%0A
[email protected]%0Adef handle_region(region):%0A Journey.objects.filter(service__region=region).delete()%0A day = date.today()%0A for service in Service.objects.filter(region=region, current=True):%0A print(service)%0A for i, xml_file in enumerate(get_files_from_zipfile(service)):%0A timetable = txc.Timetable(xml_file, None)%0A handle_timetable(service, timetable, day)%0A j = 1%0A while j %3C 7:%0A handle_timetable(service, timetable, day + ONE_DAY * j)%0A j += 1%0A%0A%0A
class Co
@@ -2518,32 +2518,8 @@
d):%0A
- @transaction.atomic%0A
@@ -2566,191 +2566,41 @@
-Journey.objects.all().delete()%0A%0A day = date.today()%0A for service in Service.objects.filter(current=True,%0A region__in=('EA',)
+for region in Region.objects.all(
).ex
@@ -2605,22 +2605,18 @@
exclude(
-region
+id
__in=('L
@@ -2653,148 +2653,14 @@
int(
-service)%0A for i, xml_file in enumerate(get_files_from_zipfile(service)):%0A timetable = txc.Timetable(xml_file, N
+regi
on
-e
)%0A
@@ -2673,204 +2673,26 @@
-
-
handle_
-timetable(service, timetable, day)%0A j = 1%0A while j %3C 7:%0A handle_timetable(service, timetable, day + ONE_DAY * j)%0A j += 1
+region(region)
%0A
|
0d5d1263af2fca0955c4db0f603af11c321fe624
|
Fix the class name for del_dynamic_range
|
lib/python2.6/aquilon/server/commands/del_dynamic_range.py
|
lib/python2.6/aquilon/server/commands/del_dynamic_range.py
|
# ex: set expandtab softtabstop=4 shiftwidth=4: -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
#
# Copyright (C) 2009,2010 Contributor
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the EU DataGrid Software License. You should
# have received a copy of the license with this program, and the
# license is published at
# http://eu-datagrid.web.cern.ch/eu-datagrid/license.html.
#
# THE FOLLOWING DISCLAIMER APPLIES TO ALL SOFTWARE CODE AND OTHER
# MATERIALS CONTRIBUTED IN CONNECTION WITH THIS PROGRAM.
#
# THIS SOFTWARE IS LICENSED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE AND ANY WARRANTY OF NON-INFRINGEMENT, ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THIS
# SOFTWARE MAY BE REDISTRIBUTED TO OTHERS ONLY BY EFFECTIVELY USING
# THIS OR ANOTHER EQUIVALENT DISCLAIMER AS WELL AS ANY OTHER LICENSE
# TERMS THAT MAY APPLY.
from sqlalchemy.sql.expression import asc
from aquilon.server.broker import BrokerCommand
from aquilon.aqdb.model import System
from aquilon.aqdb.model.network import get_net_id_from_ip
from aquilon.exceptions_ import ArgumentError
from aquilon.server.locks import lock_queue, DeleteKey
class CommandAddDynamicRange(BrokerCommand):
required_parameters = ["startip", "endip"]
def render(self, session, logger, startip, endip, **arguments):
key = DeleteKey("system", logger=logger)
try:
lock_queue.acquire(key)
self.del_dynamic_range(session, logger, startip, endip)
session.commit()
finally:
lock_queue.release(key)
return
def del_dynamic_range(self, session, logger, startip, endip):
startnet = get_net_id_from_ip(session, startip)
endnet = get_net_id_from_ip(session, endip)
if startnet != endnet:
raise ArgumentError("IPs '%s' (%s) and '%s' (%s) must be on the "
"same subnet" %
(startip, startnet.ip, endip, endnet.ip))
q = session.query(System)
q = q.filter(System.ip >= startip)
q = q.filter(System.ip <= endip)
q = q.order_by(asc(System.ip))
existing = q.all()
if not existing:
raise ArgumentError("Nothing found in range.")
if existing[0].ip != startip:
raise ArgumentError("No system found with IP address '%s'" %
startip)
if existing[-1].ip != endip:
raise ArgumentError("No system found with IP address '%s'" %
endip)
invalid = [s for s in existing if s.system_type != 'dynamic_stub']
if invalid:
raise ArgumentError("The range contains non-dynamic systems:\n" +
"\n".join(["%s (%s)" % (i.fqdn, i.ip)
for i in invalid]))
for stub in existing:
session.delete(stub)
return
|
Python
| 0.999991 |
@@ -1846,11 +1846,11 @@
mand
-Add
+Del
Dyna
|
b0735d710bfaa4939a01f44e0b830cca155151c3
|
Make sure the versionFile is closed even in the face of an exception
|
documents/versionHelper.py
|
documents/versionHelper.py
|
import datetime
from django.template.loader import render_to_string
from lxml import etree
class XMLContent:
DTBOOK_NAMESPACE = "http://www.daisy.org/z3986/2005/dtbook/"
XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace"
FIELD_ATTRIBUTE_MAP = {
'subject' : "dc:Subject",
'description' : "dc:Description",
'publisher' : "dc:Publisher",
'date' : "dc:Date",
'identifier' : "dc:Identifier",
'source' : "dc:Source",
'language' : "dc:Language",
'rights' : "dc:Rights",
'identifier' : "dtb:uid",
'source_date' : "dtb:sourceDate",
'source_publisher': "dtb:sourcePublisher",
'source_edition' : "dtb:sourceEdition",
'source_rights' : "dtb:sourceRights",
'production_series' : "prod:series",
'production_series_number' : "prod:seriesNumber",
'production_source' : "prod:source"
}
@staticmethod
def getInitialContent(document):
from django.forms.models import model_to_dict
dictionary = model_to_dict(document)
dictionary['date'] = document.date.isoformat() if document.date else ''
dictionary['source_date'] = document.source_date.isoformat() if document.source_date else ''
content = render_to_string('DTBookTemplate.xml', dictionary)
return content.encode('utf-8')
def __init__(self, version=None):
self.version = version
def getUpdatedContent(self, author, title, **kwargs):
# update the existing version with the modified meta data
self.version.content.open()
self.tree = etree.parse(self.version.content.file)
self.version.content.close()
# fix author
self._updateOrInsertMetaAttribute("dc:Creator", author)
# FIXME: Sometimes the docauthor contains xml markup, such as
# <em> and <abbr>, which is not in the meta tag. The following
# will just wipe this out.
self._updateMetaElement("docauthor", author)
# fix title
self._updateOrInsertMetaAttribute("dc:Title", title)
# FIXME: Sometimes the doctitle contains xml markup, such as
# <em> and <abbr>, which is not in the meta tag. The following
# will just wipe this out.
self._updateMetaElement("doctitle", title)
# fix xml:lang
self._updateLangAttribute(kwargs.get('language'))
for model_field, field_value in kwargs.items():
# fix attribute
if self.FIELD_ATTRIBUTE_MAP.has_key(model_field):
self._updateOrInsertMetaAttribute(self.FIELD_ATTRIBUTE_MAP[model_field], (field_value or ''))
return etree.tostring(self.tree, xml_declaration=True, encoding="UTF-8")
def validateContentMetaData(self, filePath, author, title, **kwargs):
versionFile = open(filePath)
self.tree = etree.parse(versionFile)
versionFile.close()
validationProblems = reduce(
# flatten the list
lambda x,y: x+y,
[self._validateMetaAttribute(self.FIELD_ATTRIBUTE_MAP[field], (kwargs.get(field, '') or ''))
for field in
('source_publisher', 'subject', 'description', 'publisher', 'date', 'source',
'language', 'rights', 'source_date', 'source_edition', 'source_rights',
'production_series', 'production_series_number', 'production_source')])
return filter(None, validationProblems) + filter(
None,
# validate author
self._validateMetaAttribute("dc:Creator", author) +
# FIXME: It would be nice to check the docauthor element,
# however it can contain (almost arbitrary) tags such as
# <em>, <abbr> or any contraction hint. If we want to
# check we need to strip the tags first.
# self._validateMetaElement("docauthor", author) +
# validate title
self._validateMetaAttribute("dc:Title", title) +
# FIXME: It would be nice to check the doctitle element,
# however it can contain (almost arbitrary) tags such as
# <em>, <abbr> or any contraction hint. If we want to
# check we need to strip the tags first.
# self._validateMetaElement("doctitle", title) +
# validate identifier
self._validateMetaAttribute("dc:Identifier", kwargs.get('identifier', '')) +
self._validateMetaAttribute("dtb:uid", kwargs.get('identifier', '')) +
# validate language
self._validateLangAttribute(kwargs.get('language', ''))
)
def _updateOrInsertMetaAttribute(self, key, value):
if isinstance(value, datetime.date):
value = value.isoformat()
elements = self.tree.findall("//{%s}meta[@name='%s']" % (self.DTBOOK_NAMESPACE, key))
if not elements and value:
# insert a new meta element if there wasn't one before and if the value is not empty
head = self.tree.find("//{%s}head" % self.DTBOOK_NAMESPACE)
etree.SubElement(head, "{%s}meta" % self.DTBOOK_NAMESPACE, name=key, content=value)
else:
for element in elements:
element.attrib['content'] = value
def _updateMetaElement(self, key, value):
for element in self.tree.findall("//{%s}%s" % (self.DTBOOK_NAMESPACE, key)):
element.text = value
def _updateLangAttribute(self, language):
self.tree.getroot().attrib['{%s}lang' % self.XML_NAMESPACE] = language
def _validateMetaAttribute(self, key, value):
"""Return a list of tuples for each meta data of name key
where the value of the attribute 'content' doesn't match the
given value. The tuple contains the key, the given value and
the value of the attribute 'content'"""
if isinstance(value, datetime.date):
value = value.isoformat()
r = self.tree.xpath("//dtb:meta[@name='%s']" % (key,), namespaces={'dtb': self.DTBOOK_NAMESPACE})
if not len(r) and value != '':
# hm the meta attribute is not there. It should be though. Report this as an error
return [(key, '', value)]
else:
return [tuple([key, element.attrib['content'], value])
for element in r if element.attrib['content'] != value]
def _validateMetaElement(self, key, value):
"""Return a list of tuples for each element of name key where
the text doesn't match the given value. The tuple contains the
key, the given value and the value of the text node"""
r = self.tree.xpath("//%s" % (key,), namespaces={'dtb': self.DTBOOK_NAMESPACE})
# in theory we should also check for an empty result set r. However the schema already takes care of that
return [tuple([key, element.text, value]) for element in r if element.text != value and not (element.text == None and value == '')]
def _validateLangAttribute(self, language):
lang_attribute = self.tree.getroot().attrib['{%s}lang' % self.XML_NAMESPACE]
return [('xml:lang', lang_attribute, language)] if lang_attribute != language else []
|
Python
| 0.000001 |
@@ -2834,21 +2834,12 @@
-versionFile =
+with
ope
@@ -2849,25 +2849,42 @@
ilePath)
-%0A
+ as versionFile:%0A
self.tre
@@ -2867,32 +2867,35 @@
nFile:%0A
+
self.tree = etre
@@ -2919,36 +2919,8 @@
le)%0A
- versionFile.close()%0A
|
d809c3cef9761edbf984ea9a8cf066a2f474c58d
|
fix integration tests
|
test/integration/013_context_var_tests/test_context_vars.py
|
test/integration/013_context_var_tests/test_context_vars.py
|
from nose.plugins.attrib import attr
from test.integration.base import DBTIntegrationTest
import dbt.flags
class TestContextVars(DBTIntegrationTest):
def setUp(self):
DBTIntegrationTest.setUp(self)
self.fields = [
'this',
'this.name',
'this.schema',
'this.table',
'target.dbname',
'target.host',
'target.name',
'target.port',
'target.schema',
'target.threads',
'target.type',
'target.user',
'target.pass',
'run_started_at',
'invocation_id'
]
@property
def schema(self):
return "context_vars_013"
@property
def models(self):
return "test/integration/013_context_var_tests/models"
@property
def profile_config(self):
return {
'test': {
'outputs': {
'dev': {
'type': 'postgres',
'threads': 1,
'host': 'database',
'port': 5432,
'user': 'root',
'pass': 'password',
'dbname': 'dbt',
'schema': self.schema
},
'prod': {
'type': 'postgres',
'threads': 1,
'host': 'database',
'port': 5432,
'user': 'root',
'pass': 'password',
'dbname': 'dbt',
'schema': self.schema
}
},
'target': 'dev'
}
}
def get_ctx_vars(self):
field_list = ", ".join(['"{}"'.format(f) for f in self.fields])
query = 'select {field_list} from {schema}.context'.format(
field_list=field_list,
schema=self.schema)
vals = self.run_sql(query, fetch='all')
ctx = dict([(k, v) for (k, v) in zip(self.fields, vals[0])])
return ctx
@attr(type='postgres')
def test_env_vars_dev(self):
self.run_dbt(['run'])
ctx = self.get_ctx_vars()
self.assertEqual(ctx['this'], '"context_vars_013"."context"')
self.assertEqual(ctx['this.name'], 'context')
self.assertEqual(ctx['this.schema'], 'context_vars_013')
self.assertEqual(ctx['this.table'], 'context')
self.assertEqual(ctx['target.dbname'], 'dbt')
self.assertEqual(ctx['target.host'], 'database')
self.assertEqual(ctx['target.name'], 'dev')
self.assertEqual(ctx['target.port'], 5432)
self.assertEqual(ctx['target.schema'], 'context_vars_013')
self.assertEqual(ctx['target.threads'], 1)
self.assertEqual(ctx['target.type'], 'postgres')
self.assertEqual(ctx['target.user'], 'root')
self.assertEqual(ctx['target.pass'], '')
@attr(type='postgres')
def test_env_vars_prod(self):
self.run_dbt(['run', '--target', 'prod'])
ctx = self.get_ctx_vars()
self.assertEqual(ctx['this'], '"context_vars_013"."context"')
self.assertEqual(ctx['this.name'], 'context')
self.assertEqual(ctx['this.schema'], 'context_vars_013')
self.assertEqual(ctx['this.table'], 'context')
self.assertEqual(ctx['target.dbname'], 'dbt')
self.assertEqual(ctx['target.host'], 'database')
self.assertEqual(ctx['target.name'], 'prod')
self.assertEqual(ctx['target.port'], 5432)
self.assertEqual(ctx['target.schema'], 'context_vars_013')
self.assertEqual(ctx['target.threads'], 1)
self.assertEqual(ctx['target.type'], 'postgres')
self.assertEqual(ctx['target.user'], 'root')
self.assertEqual(ctx['target.pass'], '')
|
Python
| 0 |
@@ -2338,32 +2338,41 @@
rs_013%22.%22context
+__dbt_tmp
%22')%0A self
@@ -2522,32 +2522,41 @@
able'%5D, 'context
+__dbt_tmp
')%0A%0A self
@@ -3247,16 +3247,25 @@
%22context
+__dbt_tmp
%22')%0A
@@ -3431,16 +3431,25 @@
'context
+__dbt_tmp
')%0A%0A
|
d0cbf5b21f12db17ac4970ffaaeed801b3bd0753
|
Add '--report-as-gtimelog' option
|
timeflow/cli.py
|
timeflow/cli.py
|
import datetime as dt
import os
import sys
import subprocess
from argparse import ArgumentParser
import timeflow
def log(args):
timeflow.write_to_log_file(args.message)
def _call_editor(editor, filename):
editor = editor.split()
subprocess.call(editor + [timeflow.LOG_FILE])
def edit(args):
if args.editor:
_call_editor(args.editor, timeflow.LOG_FILE)
else:
subprocess.call(['echo', 'Trying to open $EDITOR'])
if os.environ.get('EDITOR'):
_call_editor(os.environ.get('EDITOR'), timeflow.LOG_FILE)
else:
subprocess.call([
"echo",
"Set your default editor in EDITOR environment variable or \n"
"call edit command with -e option and pass your editor:\n"
"timeflow edit -e vim",
])
def stats(args):
today = False
date_from = date_to = None
if args.yesterday:
yesterday_obj = dt.datetime.now() - dt.timedelta(days=1)
date_from = date_to = yesterday_obj.strftime(timeflow.DATE_FORMAT)
elif args.day:
date_from = date_to = args.day
elif args.week:
date_from, date_to = timeflow.get_week_range(args.week)
elif args.this_week:
date_from, date_to = timeflow.get_this_week()
elif args.last_week:
date_from, date_to = timeflow.get_last_week()
elif args.month:
date_from, date_to = timeflow.get_month_range(args.month)
elif args.this_month:
date_from, date_to = timeflow.get_this_month()
elif args.last_month:
date_from, date_to = timeflow.get_last_month()
elif args._from and not args.to:
date_from = args._from
date_to = dt.datetime.now().strftime(timeflow.DATE_FORMAT)
elif args._from and args.to:
date_from = args._from
date_to = args.to
else:
# default action is to show today's stats
date_from = date_to = dt.datetime.now().strftime(timeflow.DATE_FORMAT)
today = True
if args.report:
work_report, slack_report = timeflow.calculate_report(
timeflow.read_log_file_lines(),
date_from,
date_to
)
timeflow.print_report(work_report, slack_report)
work_time, slack_time, today_work_time = timeflow.calculate_stats(
timeflow.read_log_file_lines(), date_from, date_to, today=today
)
timeflow.print_stats(work_time, slack_time, today_work_time)
def create_parser():
parser = ArgumentParser()
subparser = parser.add_subparsers()
# `log` command
log_parser = subparser.add_parser(
"log",
help="Log your time and explanation for it",
)
log_parser.add_argument(
"message",
help="The message which explains your spent time",
)
log_parser.set_defaults(func=log)
# `edit` command
edit_parser = subparser.add_parser(
"edit",
help="Open editor to fix/edit the time log",
)
edit_parser.add_argument("-e", "--editor", help="Use some editor")
edit_parser.set_defaults(func=edit)
# `stats` command
stats_parser = subparser.add_parser(
"stats",
help="Show how much time was spent working or slacking"
)
stats_parser.add_argument(
"--today",
action="store_true",
help="Show today's work times (default)"
)
stats_parser.add_argument(
"-y", "--yesterday",
action="store_true",
help="Show yesterday's work times"
)
stats_parser.add_argument(
"-d", "--day",
help="Show specific day's work times"
)
stats_parser.add_argument(
"--week",
help="Show specific week's work times"
)
stats_parser.add_argument(
"--this-week",
action="store_true",
help="Show current week's work times"
)
stats_parser.add_argument(
"--last-week",
action="store_true",
help="Show last week's work times"
)
stats_parser.add_argument(
"--month",
help="Show specific month's work times"
)
stats_parser.add_argument(
"--this-month",
action="store_true",
help="Show current month's work times"
)
stats_parser.add_argument(
"--last-month",
action="store_true",
help="Show last month's work times"
)
stats_parser.add_argument(
"-f", "--from",
help="Show work times from specific date",
dest="_from"
)
stats_parser.add_argument(
"-t", "--to",
help="Show work times from to specific date"
)
stats_parser.add_argument(
"-r", "--report",
action="store_true",
help="Show stats in report form"
)
stats_parser.set_defaults(func=stats)
# pass every argument to parser, except the program name
return parser
def cli():
parser = create_parser()
args = parser.parse_args(sys.argv[1:])
# if nothing is passed - print help
if hasattr(args, "func"):
args.func(args)
else:
parser.print_help()
|
Python
| 0.999836 |
@@ -2016,18 +2016,45 @@
s.report
+ or args.report_as_gtimelog
:%0A
-
@@ -2201,24 +2201,52 @@
o%0A )%0A
+ if args.report:%0A
time
@@ -2294,88 +2294,64 @@
rt)%0A
-%0A work_time, slack_time, today_work_time =
+ elif args.report_as_g
time
-f
lo
-w.calculate_stats(%0A
+g:%0A print(
time
@@ -2359,68 +2359,48 @@
low.
+c
rea
-d_log_file_lines(), date_from, date_to, today=today%0A )
+te_report_as_gtimelog(work_report))%0A
%0A
@@ -4730,32 +4730,180 @@
ort form%22%0A )%0A
+ stats_parser.add_argument(%0A %22--report-as-gtimelog%22,%0A action=%22store_true%22,%0A help=%22Show stats in gtimelog report form%22%0A )%0A
stats_parser
|
7674437d752be0791688533dd1409fa083672bb2
|
Switch from dictionary to namedtuple
|
genes/java/config.py
|
genes/java/config.py
|
#!/usr/bin/env python
def config():
return {
'is-oracle': True,
'version': 'oracle-java8',
}
|
Python
| 0.000003 |
@@ -15,16 +15,117 @@
python%0A
+from collections import namedtuple%0A%0A%0AJavaConfig = namedtuple('JavaConfig', %5B'is_oracle', 'version'%5D)%0A
%0A%0Adef co
@@ -147,9 +147,19 @@
urn
-%7B
+JavaConfig(
%0A
@@ -167,21 +167,18 @@
-'
is
--
+_
oracle
-':
+=
True
@@ -191,19 +191,16 @@
-'
version
-':
+=
'ora
@@ -219,7 +219,7 @@
-%7D
+)
%0A%0A
|
3d62410c84a83f963a5e12e459f04bb263978940
|
Clean up
|
src/boardFrame.py
|
src/boardFrame.py
|
from PIL import Image, ImageTk
from Tkinter import Frame, Button
class BoardFrame(Frame):
def __init__(self, master):
Frame.__init__(self, master)
self.configure(background="#336600")
# Divide screen in horizontal zones
self.topFrame = Frame(self)
self.bottomFrame = Frame(self)
self.topFrame.pack(side="top", fill="x", expand=False)
# self.bottomFrame.pack(side="bottom", fill="both", expand=True)
self.bottomFrame.pack(side="top", fill="x", expand=True)
# Divide the top frame in 2 vertically
self.topLeft = Frame(self.topFrame)
self.topRight = Frame(self.topFrame)
self.topLeft.pack(side="left", fill="x", expand=True)
self.topRight.pack(side="right", fill="x", expand=True)
# In top left put 2 frames for the stock and the waste
self.stockFrame = Frame(self.topLeft)
self.wasteFrame = Frame(self.topLeft)
self.stockFrame.pack(side="left", fill="x", expand=True)
self.wasteFrame.pack(side="right", fill="x", expand=True)
# In top right put 4 frames for the 4 foundations
self.HFrame = Frame(self.topRight, background="yellow")
self.CFrame = Frame(self.topRight, background="orange")
self.SFrame = Frame(self.topRight, background="green")
self.DFrame = Frame(self.topRight, background="grey")
self.HFrame.pack(side="right", fill="both", expand=True)
self.CFrame.pack(side="right", fill="both", expand=True)
self.SFrame.pack(side="right", fill="both", expand=True)
self.DFrame.pack(side="right", fill="both", expand=True)
# In bottom frame put 7 frames for the tableau piles
self.tableauFrames = []
for i in range(0, 7):
self.tableauFrames.append(Frame(self.bottomFrame))
self.tableauFrames[i].pack(side="left", fill="y", expand=True)
# Load common images
imageBack = Image.open("../img/back.bmp")
self.photoBack = ImageTk.PhotoImage(imageBack)
self.photoBackCropped = ImageTk.PhotoImage(imageBack.crop((0, 0, imageBack.size[0], imageBack.size[1]/4)))
self.photoEmpty = ImageTk.PhotoImage(Image.open("../img/empty.bmp"))
self.photoHEmpty = ImageTk.PhotoImage(Image.open("../img/Hempty.bmp"))
self.photoCEmpty = ImageTk.PhotoImage(Image.open("../img/Cempty.bmp"))
self.photoSEmpty = ImageTk.PhotoImage(Image.open("../img/Sempty.bmp"))
self.photoDEmpty = ImageTk.PhotoImage(Image.open("../img/Dempty.bmp"))
# Put initial waste button
self.wasteButton = Button(self.wasteFrame, image=self.photoEmpty)
self.wasteButton.photo = self.photoEmpty
self.wasteButton.pack(side="top", fill="both", expand=False)
# Put initial stock button
self.stockButton = Button(self.stockFrame, image=self.photoBack)
self.stockButton.photo = self.photoBack
self.stockButton.pack(side="top", fill="both", expand=False)
# Put initial foundations buttons
self.HButton = Button(self.HFrame, image=self.photoHEmpty)
self.CButton = Button(self.CFrame, image=self.photoCEmpty)
self.SButton = Button(self.SFrame, image=self.photoSEmpty)
self.DButton = Button(self.DFrame, image=self.photoDEmpty)
self.HButton.pack(side="top", fill="both", expand=False)
self.CButton.pack(side="top", fill="both", expand=False)
self.SButton.pack(side="top", fill="both", expand=False)
self.DButton.pack(side="top", fill="both", expand=False)
# To be called by the board class when graphics are updated
def updateGUI(self, board):
print(board.__str__())
# Update stock and waste buttons
self.stockButton.configure(command=board.pickCardFromStock)
resetStockButtonImage = True
if (len(board.stock) > 0):
self.wasteButton.configure(image=board.stock[-1].photoFaceUp)
if (resetStockButtonImage):
self.stockButton.configure(image=self.photoBack)
resetStockButtonImage = False
else:
self.stockButton.configure(image=self.photoEmpty)
resetStockButtonImage = True
# Update foundations buttons
if (len(board.H) > 0):
self.HButton.configure(image=board.H[-1].photoFaceUp)
if (len(board.C) > 0):
self.CButton.configure(image=board.C[-1].photoFaceUp)
if (len(board.S) > 0):
self.SButton.configure(image=board.S[-1].photoFaceUp)
if (len(board.D) > 0):
self.DButton.configure(image=board.D[-1].photoFaceUp)
# Update tableau piles
frame = -1
for pile in board.PlayingStacks:
frame += 1
r = -1
for card in pile:
r += 1
if (card != pile[-1]):
if (card.facedown):
image=self.photoBackCropped
else:
image=card.photoFaceUpCropped
else:
if (card.facedown):
image=self.photoBack
else:
image=card.photoFaceUp
Button(self.tableauFrames[frame], image=image).grid(row=r, column=0)
|
Python
| 0.000002 |
@@ -393,18 +393,16 @@
%0A
- #
self.bo
@@ -422,22 +422,19 @@
k(side=%22
-bot
to
-m
+p
%22, fill=
@@ -457,73 +457,8 @@
rue)
-%0A self.bottomFrame.pack(side=%22top%22, fill=%22x%22, expand=True)
%0A%0A
@@ -820,32 +820,54 @@
ame(self.topLeft
+, background=%22#336600%22
)%0A self.w
@@ -896,16 +896,38 @@
.topLeft
+, background=%22#336600%22
)%0A
@@ -1169,14 +1169,15 @@
nd=%22
-yellow
+#336600
%22)%0A
@@ -1234,14 +1234,15 @@
nd=%22
-orange
+#336600
%22)%0A
@@ -1299,13 +1299,15 @@
nd=%22
-green
+#336600
%22)%0A
@@ -1364,12 +1364,15 @@
nd=%22
-grey
+#336600
%22)%0A
@@ -1754,16 +1754,16 @@
(0, 7):%0A
-
@@ -1814,16 +1814,38 @@
tomFrame
+, background=%22#336600%22
))%0A
|
becf684fc06890679f4c0cdfed1761962e16a343
|
Make extra_context at browse_repository view not overriding provided variables
|
vcs/web/simplevcs/views/repository.py
|
vcs/web/simplevcs/views/repository.py
|
from django.contrib import messages
from django.template import RequestContext
from django.shortcuts import render_to_response
from vcs.exceptions import VCSError
def browse_repository(request, repository, template_name, revision=None,
node_path='', extra_context={}):
"""
Generic repository browser.
Provided context variables:
- ``repository``: same what was given
- ``changeset``: based on the given ``revision`` or tip if none given
- ``root``: repositorie's node on the given ``node_path``
"""
context = {}
try:
context.update(dict(
changeset = repository.get_changeset(),
root = repository.request(node_path, revision=revision),
))
except VCSError, err:
messages.error(request, str(err))
for key, value in extra_context.items():
context[key] = callable(value) and value() or value
return render_to_response(template_name, context, RequestContext(request))
|
Python
| 0.00001 |
@@ -549,16 +549,122 @@
xt = %7B%7D%0A
+ for key, value in extra_context.items():%0A context%5Bkey%5D = callable(value) and value() or value%0A%0A
try:
@@ -896,113 +896,8 @@
rr))
-%0A for key, value in extra_context.items():%0A context%5Bkey%5D = callable(value) and value() or value
%0A%0A
|
7ecaeba33a4fe559f6122953581e533720cb2404
|
Add select mkl libs (#22580)
|
var/spack/repos/builtin/packages/intel-oneapi-mkl/package.py
|
var/spack/repos/builtin/packages/intel-oneapi-mkl/package.py
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from sys import platform
from spack import *
class IntelOneapiMkl(IntelOneApiLibraryPackage):
"""Intel oneAPI MKL."""
maintainers = ['rscohn2']
homepage = 'https://software.intel.com/content/www/us/en/develop/tools/oneapi/components/onemkl.html'
if platform == 'linux':
version('2021.1.1',
sha256='818b6bd9a6c116f4578cda3151da0612ec9c3ce8b2c8a64730d625ce5b13cc0c',
url='https://registrationcenter-download.intel.com/akdlm/irc_nas/17402/l_onemkl_p_2021.1.1.52_offline.sh',
expand=False)
depends_on('intel-oneapi-tbb')
provides('fftw-api@3')
provides('scalapack')
provides('mkl')
provides('lapack')
provides('blas')
@property
def component_dir(self):
return 'mkl'
|
Python
| 0 |
@@ -975,8 +975,294 @@
n 'mkl'%0A
+%0A @property%0A def libs(self):%0A lib_path = '%7B0%7D/%7B1%7D/latest/lib/intel64'.format(self.prefix, self.component_dir)%0A mkl_libs = %5B'libmkl_intel_ilp64', 'libmkl_sequential', 'libmkl_core'%5D%0A return find_libraries(mkl_libs, root=lib_path, shared=True, recursive=False)%0A
|
67cea85323195440330580cc3731447956a4ad32
|
add default user settings packet
|
litecord/managers/user_settings.py
|
litecord/managers/user_settings.py
|
class SettingsManager:
"""User settings manager.
Provides functions for users to change their settings and retrieve them back.
Attributes
----------
server: :class:`LitecordServer`
Litecord server instance.
settings_coll: `mongo collection`
User settings MongoDB collection.
"""
def __init__(self, server):
self.server = server
self.guild_man = server.guild_man
self.settings_coll = self.server.settings_coll
async def get_settings(self, user):
"""Get a settings object from a User ID.
Parameters
----------
user_id: :class:`User`
User ID to be get settings from.
"""
if user.bot:
return {}
settings = await self.settings_coll.find_one({'user_id': user.id})
if settings is None:
settings = {}
return settings
async def get_guild_settings(self, user):
"""Get a User Guild Settings object to be used
in READY payloads.
Parameters
----------
user_id: :class:`User`
User ID to get User Guild Settings payload for.
Returns
-------
list
The User Guild Settings payload.
"""
if user.bot:
return []
res = []
async for guild in self.guild_man.yield_guilds(user.id):
res.append(guild.default_settings)
return res
|
Python
| 0.000009 |
@@ -879,16 +879,928 @@
ings = %7B
+%0A 'timezone_offset': 0,%0A 'theme': 'dark',%0A 'status': 'online',%0A 'show_current_game': False,%0A 'restricted_guilds': %5B%5D,%0A 'render_reactions': True,%0A 'render_embeds:': True,%0A 'message_display_compact': True,%0A 'locale': 'en-US',%0A 'inline_embed_media': False,%0A 'inline_attachment_media': False,%0A 'guild_positions': %5B%5D,%0A 'friend_source_flags': %7B%0A 'all': True,%0A %7D,%0A 'explicit_content_filter': 1,%0A 'enable_tts_command': False,%0A 'developer_mode': False,%0A 'detect_platform_accounts': False,%0A 'default_guilds_restricted': False,%0A 'convert_emoticons': True,%0A 'afk_timeout': 600,%0A
%7D%0A
|
f5838692d711e6c8d0b8f0dc7716ea28707df4f2
|
Add default for playlist_dir
|
beetsplug/smartplaylist.py
|
beetsplug/smartplaylist.py
|
# This file is part of beets.
# Copyright 2013, Dang Mai <[email protected]>.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Generates smart playlists based on beets queries.
"""
from __future__ import print_function
from beets.plugins import BeetsPlugin
from beets import config, ui
from beets.util import normpath, syspath
import os
# Global variables so that smartplaylist can detect database changes and run
# only once before beets exits.
database_changed = False
library = None
def update_playlists(lib):
from beets.util.functemplate import Template
print("Updating smart playlists...")
playlists = config['smartplaylist']['playlists'].get(list)
playlist_dir = config['smartplaylist']['playlist_dir'].get(unicode)
relative_to = config['smartplaylist']['relative_to'].get()
if relative_to:
relative_to = normpath(relative_to)
for playlist in playlists:
items = lib.items(playlist['query'])
m3us = {}
basename = playlist['name'].encode('utf8')
# As we allow tags in the m3u names, we'll need to iterate through
# the items and generate the correct m3u file names.
for item in items:
m3u_name = item.evaluate_template(Template(basename), lib=lib)
if not (m3u_name in m3us):
m3us[m3u_name] = []
if relative_to:
m3us[m3u_name].append(os.path.relpath(item.path, relative_to))
else:
m3us[m3u_name].append(item.path)
# Now iterate through the m3us that we need to generate
for m3u in m3us:
m3u_path = normpath(os.path.join(playlist_dir, m3u))
with open(syspath(m3u_path), 'w') as f:
for path in m3us[m3u]:
f.write(path + '\n')
print("... Done")
class SmartPlaylistPlugin(BeetsPlugin):
def __init__(self):
super(SmartPlaylistPlugin, self).__init__()
self.config.add({
'relative_to': None,
'playlists': []
})
def commands(self):
def update(lib, opts, args):
update_playlists(lib)
spl_update = ui.Subcommand('splupdate',
help='update the smart playlists')
spl_update.func = update
return [spl_update]
@SmartPlaylistPlugin.listen('database_change')
def handle_change(lib):
global library
global database_changed
library = lib
database_changed = True
@SmartPlaylistPlugin.listen('cli_exit')
def update():
if database_changed:
update_playlists(library)
|
Python
| 0.000001 |
@@ -2188,16 +2188,56 @@
, m3u))%0A
+ import pdb; pdb.set_trace()%0A
@@ -2559,16 +2559,50 @@
: None,%0A
+ 'playlist_dir': u'.',%0A
|
603a59785f24aa98662e72d954b3aa0521ad0629
|
Make repeatability tests for severities specified by CLI
|
test/unit/vint/linting/config/test_config_cmdargs_source.py
|
test/unit/vint/linting/config/test_config_cmdargs_source.py
|
import unittest
from test.asserting.config_source import ConfigSourceAssertion
from vint.linting.config.config_cmdargs_source import ConfigCmdargsSource
from vint.linting.level import Level
class TestConfigFileSource(ConfigSourceAssertion, unittest.TestCase):
def test_get_config_dict(self):
expected_config_dict = {
'cmdargs': {
'verbose': True,
'severity': Level.WARNING,
'max-violations': 10,
},
}
env = {
'cmdargs': {
'verbose': True,
'style': True,
'warning': True,
'max-violations': 10,
},
}
config_source = self.initialize_config_source_with_env(ConfigCmdargsSource, env)
self.assertConfigDict(config_source, expected_config_dict)
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000001 |
@@ -305,196 +305,910 @@
e
-xpected_config_dict = %7B%0A 'cmdargs': %7B%0A 'verbose': True,%0A 'severity': Level.WARNING,%0A 'max-violations': 10,%0A %7D,%0A %7D%0A
+nv = %7B%0A 'cmdargs': %7B%0A 'verbose': True,%0A 'style': True,%0A 'warning': True,%0A 'max-violations': 10,%0A %7D,%0A %7D%0A%0A expected_config_dict = %7B%0A 'cmdargs': %7B%0A 'verbose': True,%0A 'severity': Level.WARNING,%0A 'max-violations': 10,%0A %7D,%0A %7D%0A%0A config_source = self.initialize_config_source_with_env(ConfigCmdargsSource, env)%0A self.assertConfigDict(config_source, expected_config_dict)%0A%0A%0A def test_get_config_dict_with_no_severity(self):%0A env = %7B'cmdargs': %7B%7D%7D%0A%0A expected_config_dict = %7B'cmdargs': %7B%7D%7D%0A%0A config_source = self.initialize_config_source_with_env(ConfigCmdargsSource, env)%0A self.assertConfigDict(config_source, expected_config_dict)%0A%0A%0A def test_get_config_dict_with_severity_style_problem(self):
%0A
@@ -1254,39 +1254,45 @@
'
-verbose
+style_problem
': True,%0A
@@ -1300,96 +1300,1007 @@
- 'style': True,%0A 'warning': True,%0A 'max-violations': 10
+%7D,%0A %7D%0A%0A expected_config_dict = %7B%0A 'cmdargs': %7B%0A 'severity': Level.STYLE_PROBLEM,%0A %7D,%0A %7D%0A%0A config_source = self.initialize_config_source_with_env(ConfigCmdargsSource, env)%0A self.assertConfigDict(config_source, expected_config_dict)%0A%0A%0A def test_get_config_dict_with_severity_warning(self):%0A env = %7B%0A 'cmdargs': %7B%0A 'warning': True,%0A %7D,%0A %7D%0A%0A expected_config_dict = %7B%0A 'cmdargs': %7B%0A 'severity': Level.WARNING,%0A %7D,%0A %7D%0A%0A config_source = self.initialize_config_source_with_env(ConfigCmdargsSource, env)%0A self.assertConfigDict(config_source, expected_config_dict)%0A%0A%0A def test_get_config_dict_with_severity_error(self):%0A env = %7B%0A 'cmdargs': %7B%0A 'error': True,%0A %7D,%0A %7D%0A%0A expected_config_dict = %7B%0A 'cmdargs': %7B%0A 'severity': Level.ERROR
,%0A
|
a45e41bdbf7356816a153aef8d06ee803324e1a6
|
Add a description to the add-enclosure example script help output
|
examples/scripts/add-enclosure.py
|
examples/scripts/add-enclosure.py
|
#!/usr/bin/env python3
###
# (C) Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
import sys
if sys.version_info < (3, 4):
raise Exception('Must use Python 3.4 or later')
import hpOneView as hpov
from pprint import pprint
def acceptEULA(con):
# See if we need to accept the EULA before we try to log in
con.get_eula_status()
try:
if con.get_eula_status() is True:
print('EULA display needed')
con.set_eula('no')
except Exception as e:
print('EXCEPTION:')
print(e)
def login(con, credential):
# Login with givin credentials
try:
con.login(credential)
except:
print('Login failed')
def import_enclosure(srv, sts, eg, ip, usr, pas, lic, baseline, force, forcefw):
# Locate the enclosure group
egroups = srv.get_enclosure_groups()
for group in egroups:
if group['name'] == eg:
egroup = group
else:
print('ERROR: Importing Enclosure')
print('Enclosure Group: "%s" has not been defined' % eg)
print('')
sys.exit()
print('Adding Enclosure')
# Find the first Firmware Baseline
uri = ''
if baseline:
spps = sts.get_spps()
for spp in spps:
if spp['isoFileName'] == baseline:
uri = spp['uri']
if not uri:
print('ERROR: Locating Firmeware Baseline SPP')
print('Baseline: "%s" can not be located' % baseline)
print('')
sys.exit()
if not uri:
add_enclosure = hpov.common.make_enclosure_dict(ip, usr, pas,
egroup['uri'],
licenseIntent=lic,
force=force,
forcefw=forcefw)
else:
add_enclosure = hpov.common.make_enclosure_dict(ip, usr, pas,
egroup['uri'],
licenseIntent=lic,
firmwareBaseLineUri=uri,
force=force,
forcefw=forcefw)
enclosure = srv.add_enclosure(add_enclosure)
if 'enclosureType' in enclosure:
print('Type: ', enclosure['enclosureType'])
print('Name: ', enclosure['name'])
print('Rack: ', enclosure['rackName'])
print('Serial Number: ', enclosure['serialNumber'])
else:
pprint(enclosure)
def main():
parser = argparse.ArgumentParser(add_help=True, description='Usage',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-a', dest='host', required=True,
help='''
HP OneView Appliance hostname or IP address''')
parser.add_argument('-u', dest='user', required=False,
default='Administrator',
help='''
HP OneView Username''')
parser.add_argument('-p', dest='passwd', required=True,
help='''
HP OneView Password''')
parser.add_argument('-c', dest='cert', required=False,
help='''
Trusted SSL Certificate Bundle in PEM (Base64 Encoded DER) Format''')
parser.add_argument('-y', dest='proxy', required=False,
help='''
Proxy (host:port format''')
parser.add_argument('-eu', dest='encusr', required=True,
help='''
Administrative username for the c7000 enclosure OA''')
parser.add_argument('-ep', dest='encpass', required=True,
help='''
Administrative password for the c7000 enclosure OA''')
parser.add_argument('-oa', dest='enc', required=True,
help='''
IP address of the c7000 to import into HP OneView''')
parser.add_argument('-eg', dest='egroup', required=True,
help='''
Enclosure Group to add the enclosure to''')
parser.add_argument('-s', dest='spp', required=False,
help='''
SPP Baseline file name. e.g. SPP2013090_2013_0830_30.iso''')
parser.add_argument('-l', dest='license', required=False,
choices=['OneView', 'OneViewNoiLO'],
default='OneView',
help='''
Specifies whether the intent is to apply either OneView or
OneView w/o iLO licenses to the servers in the enclosure
being imported.
Accepted values are:
- OneView
- OneViewNoiLO ''')
parser.add_argument('-f', dest='force', action='store_true', required=False,
help='''
When attempting to add an Enclosure to the appliance, the appliance will
validate the target Enclosure is not already claimed. If it is, this
parameter is used when the Enclosure has been claimed by another appliance
to bypass the confirmation prompt, and force add the import of the
Enclosure ''')
parser.add_argument('-fw', dest='forcefw', action='store_true',
required=False,
help='''
Force the installation of the provided Firmware Baseline. ''')
args = parser.parse_args()
credential = {'userName': args.user, 'password': args.passwd}
con = hpov.connection(args.host)
srv = hpov.servers(con)
sts = hpov.settings(con)
if args.proxy:
con.set_proxy(args.proxy.split(':')[0], args.proxy.split(':')[1])
if args.cert:
con.set_trusted_ssl_bundle(args.cert)
login(con, credential)
acceptEULA(con)
import_enclosure(srv, sts, args.egroup, args.enc, args.encusr, args.encpass,
args.license, args.spp, args.force, args.forcefw)
if __name__ == '__main__':
import sys
import argparse
sys.exit(main())
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
Python
| 0.000001 |
@@ -3832,29 +3832,8 @@
rue,
- description='Usage',
%0A
@@ -3911,16 +3911,315 @@
ormatter
+,%0A description='''%0A This example script will import an enclosure into HP OneView as a%0A managed device. The Onboard Administrator needs to have IP Address%0A configured for each module, and a valid Administrator account with a%0A password.%0A%0A Usage: '''
)%0A pa
@@ -6171,16 +6171,40 @@
e_true',
+%0A
require
@@ -7238,16 +7238,37 @@
.encusr,
+%0A
args.en
@@ -7273,27 +7273,16 @@
encpass,
-%0A
args.li
@@ -7309,16 +7309,37 @@
s.force,
+%0A
args.fo
|
54cea5e302820c35025e1afc64b2058a48c5b174
|
Implement pop in the data storage module
|
desertbot/datastore.py
|
desertbot/datastore.py
|
import json
import os
class DataStore(object):
def __init__(self, storagePath, defaultsPath):
self.storagePath = storagePath
self.defaultsPath = defaultsPath
self.data = {}
self.load()
def load(self):
# if a file data/defaults/<module>.json exists, it has priority on load
if os.path.exists(self.defaultsPath):
with open(self.defaultsPath) as storageFile:
self.data = json.load(storageFile)
# if not, use data/<network>/<module>.json instead
elif os.path.exists(self.storagePath):
with open(self.storagePath) as storageFile:
self.data = json.load(storageFile)
# if there's nothing, make sure the folder at least exists for the server-specific data files
else:
os.makedirs(os.path.dirname(self.storagePath), exist_ok=True)
def save(self):
# don't save empty files, to keep the data directories from filling up with pointless files
if len(self.data) != 0:
tmpFile = f"{self.storagePath}.tmp"
with open(tmpFile, "w") as storageFile:
storageFile.write(json.dumps(self.data, indent=4))
os.rename(tmpFile, self.storagePath)
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
def __getitem__(self, item):
return self.data[item]
def __setitem__(self, key, value):
self.data[key] = value
self.save()
def __contains__(self, key):
return key in self.data
def __delitem__(self, key):
del self.data[key]
def items(self):
return self.data.items()
def values(self):
return self.data.values()
def keys(self):
return self.data.keys()
def get(self, key, defaultValue=None):
return self.data.get(key, defaultValue)
|
Python
| 0.000001 |
@@ -1510,19 +1510,16 @@
save()%0A
-
%0A def
@@ -1889,12 +1889,111 @@
faultValue)%0A
+%0A def pop(self, key):%0A data = self.data.pop(key)%0A self.save()%0A return data%0A
|
c96ec5e3f2aca63fb705310773fcdc6960b2efa9
|
Update method will update if a matching record exists, else create a new record.
|
datums/models/base.py
|
datums/models/base.py
|
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy import create_engine
import os
# Initialize Base class
Base = declarative_base()
metadata = Base.metadata
session_maker = sessionmaker()
session = scoped_session(session_maker)
engine = create_engine(os.environ['DATABASE_URI'])
session.configure(bind=engine)
class GhostBase(Base):
'''The GhostBase class extends the declarative Base class.'''
__abstract__ = True
@classmethod
def get_or_create(cls, **kwargs):
'''
If a record matching the instance already exists in the database, then
return it, otherwise create a new record.
'''
q = session.query(cls).filter_by(**kwargs).first()
if q:
return q
q = cls(**kwargs)
session.add(q)
session.commit()
return q
@classmethod
def update(cls, snapshot, **kwargs):
'''
If a record matching the instance id already exists in the database,
update it.
'''
q = session.query(cls).filter_by(**kwargs).first()
if q:
for k in snapshot:
q.__dict__.update(k=snapshot[k])
session.add(q)
session.commit()
@classmethod
def delete(cls, **kwargs):
'''
If a record matching the instance id exists in the database, delete it.
'''
q = session.query(cls).filter_by(**kwargs).first()
if q:
session.delete(q)
session.commit()
class ResponseClassLegacyAccessor(object):
def __init__(self, response_class, column, accessor):
self.response_class = response_class
self.column = column
self.accessor = accessor
def get_or_create_from_legacy_response(self, response, **kwargs):
response_cls = self.response_class(**kwargs)
# Return the existing or newly created response record
response_cls = response_cls.get_or_create(**kwargs)
# If the record does not have a response, add it
if not getattr(response_cls, self.column):
setattr(response_cls, self.column, self.accessor(response))
session.add(response_cls)
session.commit()
def update(self, response, **kwargs):
response_cls = self.response_class(**kwargs)
# Return the existing response record
response_cls = session.query(
response_cls.__class__).filter_by(**kwargs).first()
if response_cls:
setattr(response_cls, self.column, self.accessor(response))
session.add(response_cls)
session.commit()
def delete(self, response, **kwargs):
response_cls = self.response_class(**kwargs)
# Return the existing response record
response_cls = session.query(
response_cls.__class__).filter_by(**kwargs).first()
if response_cls:
session.delete(response_cls)
session.commit()
class LocationResponseClassLegacyAccessor(ResponseClassLegacyAccessor):
def __init__(self, response_class, column, accessor, venue_column, venue_accessor):
super(LocationResponseClassLegacyAccessor, self).__init__(
response_class, column, accessor)
self.venue_column = venue_column
self.venue_accessor = venue_accessor
def get_or_create_from_legacy_response(self, response, **kwargs):
ResponseClassLegacyAccessor.get_or_create_from_legacy_response(
self, response, **kwargs)
response_cls = self.response_class(**kwargs)
# Return the existing or newly created response record
response_cls = response_cls.get_or_create(**kwargs)
# If the record does not have a response, add it
if not getattr(response_cls, self.column):
setattr(response_cls, self.column, self.accessor(response))
if not getattr(response_cls, self.venue_column):
setattr(
response_cls, self.venue_column, self.venue_accessor(response))
session.add(response_cls)
session.commit()
def update(self, response, **kwargs):
response_cls = self.response_class(**kwargs)
# Return the existing response record
response_cls = session.query(
response_cls.__class__).filter_by(**kwargs).first()
if response_cls:
setattr(response_cls, self.column, self.accessor(response))
setattr(
response_cls, self.venue_column, self.venue_accessor(response))
session.add(response_cls)
session.commit()
def database_setup(engine):
# Set up the database
metadata.create_all(engine)
def database_teardown(engine):
# BURN IT TO THE GROUND
metadata.drop_all(engine)
|
Python
| 0 |
@@ -1057,24 +1057,114 @@
update it.
+ If a record matching the instance id does not already exist,%0A create a new record.
%0A '''
@@ -1368,24 +1368,78 @@
ion.commit()
+%0A else:%0A cls.get_or_create(**kwargs)
%0A%0A @class
@@ -2816,32 +2816,126 @@
session.commit()
+%0A else:%0A response_cls.get_or_create_from_legacy_response(response, **kwargs)
%0A%0A def delete
|
59c4b6c7d96e0871469e816a847f823bad3b6748
|
Use protocol argument to link rather than manual protocol replacement.
|
velruse/utils.py
|
velruse/utils.py
|
"""Utilities for the auth functionality"""
import sys
import uuid
try:
import simplejson as json
except ImportError:
import json
import webob.exc as exc
from routes import URLGenerator
from openid.oidutil import autoSubmitHTML
from webob import Response
from velruse.baseconvert import base_encode
from velruse.errors import error_dict
def redirect_form(end_point, token):
"""Generate a redirect form for POSTing"""
return """
<form action="%s" method="post" accept-charset="UTF-8" enctype="application/x-www-form-urlencoded">
<input type="hidden" name="token" value="%s" />
<input type="submit" value="Continue"/></form>
""" % (end_point, token)
def generate_token():
"""Generate a random token"""
return base_encode(uuid.uuid4().int)
def load_package_obj(package_obj_string):
"""Extract a package name and object name, import the package and return
the object from that package by name.
The format is velruse.store.memstore:MemoryStore.
"""
package_name, obj_name = package_obj_string.split(':')
__import__(package_name)
return getattr(sys.modules[package_name], obj_name)
# Copied from Paste
def path_info_pop(environ):
"""
'Pops' off the next segment of PATH_INFO, pushing it onto
SCRIPT_NAME, and returning that segment.
For instance::
>>> def call_it(script_name, path_info):
... env = {'SCRIPT_NAME': script_name, 'PATH_INFO': path_info}
... result = path_info_pop(env)
... print 'SCRIPT_NAME=%r; PATH_INFO=%r; returns=%r' % (
... env['SCRIPT_NAME'], env['PATH_INFO'], result)
>>> call_it('/foo', '/bar')
SCRIPT_NAME='/foo/bar'; PATH_INFO=''; returns='bar'
>>> call_it('/foo/bar', '')
SCRIPT_NAME='/foo/bar'; PATH_INFO=''; returns=None
>>> call_it('/foo/bar', '/')
SCRIPT_NAME='/foo/bar/'; PATH_INFO=''; returns=''
>>> call_it('', '/1/2/3')
SCRIPT_NAME='/1'; PATH_INFO='/2/3'; returns='1'
>>> call_it('', '//1/2')
SCRIPT_NAME='//1'; PATH_INFO='/2'; returns='1'
"""
path = environ.get('PATH_INFO', '')
if not path:
return None
while path.startswith('/'):
environ['SCRIPT_NAME'] += '/'
path = path[1:]
if '/' not in path:
environ['SCRIPT_NAME'] += path
environ['PATH_INFO'] = ''
return path
else:
segment, path = path.split('/', 1)
environ['PATH_INFO'] = '/' + path
environ['SCRIPT_NAME'] += segment
return segment
class RouteResponder(object):
"""RouteResponder for Routes-based dispatching Responder"""
def __call__(self, req):
"""Handle being called with a request object"""
results = self.map.routematch(environ=req.environ)
if not results:
return exc.HTTPNotFound()
match = results[0]
kwargs = match.copy()
link = URLGenerator(self.map, req.environ)
req.environ['wsgiorg.routing_args'] = ((), match)
req.link = link
self.map.environ = req.environ
action = kwargs.pop('action')
return getattr(self, action)(req, **kwargs)
def _error_redirect(self, error_code, end_point):
"""Redirect the user to the endpoint, save the error
status to the storage under the token"""
token = generate_token()
self.storage.store(token, error_dict(error_code))
form_html = redirect_form(end_point, token)
return Response(body=autoSubmitHTML(form_html))
def _success_redirect(self, user_data, end_point):
"""Redirect the user to the endpoint, save the user_data to a new
random token in storage"""
# Generate the token, store the extracted user-data for 5 mins, and send back
token = generate_token()
self.storage.store(token, user_data, expires=300)
form_html = redirect_form(end_point, token)
return Response(body=autoSubmitHTML(form_html))
def _get_return_to(self, req):
return_to = req.link('process', qualified=True)
# post-process the return_to protocol.
if self.protocol:
if return_to.startswith('https://') and self.protocol == 'http':
return_to = return_to.replace('https://', "%s://"
%(self.protocol))
elif return_to.startswith('http://') and self.protocol == 'https':
return_to = return_to.replace('http://', "%s://"
%(self.protocol))
return return_to
class _Missing(object):
def __repr__(self):
return 'no value'
def __reduce__(self):
return '_missing'
_missing = _Missing()
class cached_property(object):
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value::
class Foo(object):
@cached_property
def foo(self):
# calculate something important here
return 42
The class has to have a `__dict__` in order for this property to
work.
"""
# implementation detail: this property is implemented as non-data
# descriptor. non-data descriptors are only invoked if there is
# no entry with the same name in the instance's __dict__.
# this allows us to completely get rid of the access function call
# overhead. If one choses to invoke __get__ by hand the property
# will still work as expected because the lookup logic is replicated
# in __get__ for manual invocation.
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
def __get__(self, obj, type=None):
if obj is None:
return self
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
|
Python
| 0 |
@@ -4015,32 +4015,62 @@
_to(self, req):%0A
+ if self.protocol:%0A
return_t
@@ -4111,68 +4111,19 @@
True
-)%0A # post-process the return_to protocol.%0A if
+, protocol=
self
@@ -4131,29 +4131,26 @@
protocol
-:%0A
+)%0A
if retu
@@ -4145,392 +4145,73 @@
- if return_to.startswith('https://') and self.protocol == 'http':%0A return_to = return_to.replace('https://', %22%25s://%22%0A %25(self.protocol))%0A elif return_to.startswith('http://') and self.protocol == 'https':%0A return_to = return_to.replace('http://', %22%25s://%22%0A %25(self.protocol))%0A return return_to
+else:%0A return_to = req.link('process', qualified=True)
%0A%0A%0Ac
|
9f4fbb1db8e96a798bca1ae72d8cae8b90ba7d60
|
Add Message.fields() shortcut to iterating message fields
|
venom/message.py
|
venom/message.py
|
from abc import ABCMeta
from collections import MutableMapping
from collections import OrderedDict
from typing import Any, Dict, Type
from venom.fields import Field, FieldDescriptor
from venom.util import meta
class OneOf(object):
def __init__(self, *choices):
self.choices = choices
# TODO helper functions.
def which(self):
raise NotImplementedError
def get(self) -> Any:
raise NotImplementedError
class MessageMeta(ABCMeta):
@classmethod
def __prepare__(metacls, name, bases):
return OrderedDict()
def __new__(metacls, name, bases, members):
cls = super(MessageMeta, metacls).__new__(metacls, name, bases, members)
cls.__fields__ = OrderedDict(getattr(cls, '__fields__') or ())
cls.__meta__, meta_changes = meta(bases, members)
cls.__meta__.wire_formats = {}
if not meta_changes.get('name', None):
cls.__meta__.name = name
for name, member in members.items():
if isinstance(member, FieldDescriptor):
cls.__fields__[name] = member
if member.name is None:
member.name = name
elif isinstance(member, OneOf):
cls.__meta__.one_of_groups += (name, member.choices)
return cls
class Message(MutableMapping, metaclass=MessageMeta):
__slots__ = ('_values',)
__fields__ = None # type: Dict[str, Field]
__meta__ = None # type: Dict[str, Any]
class Meta:
name = None
one_of_groups = ()
wire_formats = None
def __init__(self, *args, **kwargs):
if args:
self._values = {}
for value, key in zip(args, self.__fields__.keys()):
self._values[key] = value
for key, value in kwargs.items():
self._values[key] = value
else:
self._values = {key: value for key, value in kwargs.items()}
@classmethod
def from_object(cls, obj):
kwargs = {}
for key, field in cls.__fields__.items():
if hasattr(obj, '__getitem__'):
try:
kwargs[key] = obj[key]
continue
except (IndexError, TypeError, KeyError):
pass
try:
kwargs[key] = getattr(obj, key)
except AttributeError:
pass
return cls(**kwargs)
def __getitem__(self, key):
return self._values[key]
def __setitem__(self, key, value):
self._values[key] = value
def __delitem__(self, key):
del self._values[key]
def __contains__(self, key):
return key in self._values
def __iter__(self):
return iter(self._values)
def __len__(self):
return len(self._values)
def __repr__(self):
parts = []
for key in self.__fields__.keys():
if key in self._values:
parts.append('{}={}'.format(key, repr(self._values[key])))
return '{}({})'.format(self.__meta__.name, ', '.join(parts))
def one_of(*choices):
"""
Usage:::
class SearchRequest(Message):
query = one_of('name', 'id')
s = SearchRequest(id=123)
s.query.which() # 'id'
"""
return OneOf(choices)
class Empty(Message):
pass
def message_factory(name: str, fields: Dict[str, FieldDescriptor]) -> Type[Message]:
return type(name, (Message,), fields)
def get_or_default(message: Message, key: str, default: Any = None):
try:
return message[key]
except KeyError as e:
if key in message.__fields__:
if default is None:
return message.__fields__[key].default()
return default
raise e
|
Python
| 0.000003 |
@@ -126,16 +126,26 @@
ct, Type
+, Iterable
%0A%0Afrom v
@@ -166,15 +166,8 @@
port
- Field,
Fie
@@ -1431,16 +1431,26 @@
r, Field
+Descriptor
%5D%0A __
@@ -1951,16 +1951,123 @@
ems()%7D%0A%0A
+ @classmethod%0A def fields(cls) -%3E Iterable%5BFieldDescriptor%5D:%0A return cls.__fields__.values()%0A%0A
@cla
|
bacf7dda4baa7aa930a613394d90f29bf83f6209
|
Fix windowsservice.py
|
executor_worker/windowsservice.py
|
executor_worker/windowsservice.py
|
'''
windowsservice.py: runs node_worker.js as a Windows service
HOWTO:
Install http://sourceforge.net/projects/pywin32/
.\windowsservice.py addservicelogon kevin
.\windowsservice.py --username .\kevin --password secret install
.\windowsservice.py start
.\windowsservice.py stop
.\windowsservice.py remove
Workers are listed at http://localhost:8855/rest/external/executor/worker
These log files are written in this directory: npm.log service_stderr.log service_stdout.log
Service startup errors are written to Event Viewer> Application log
'''
import os
import sys
import win32serviceutil
import win32service
import servicemanager
import win32event
import os.path
import subprocess
import win32api
import win32job
import win32file
import win32process
import win32service
import win32security
class WebGMEWorkerService(win32serviceutil.ServiceFramework):
_svc_name_ = "WebGMEWorker"
_svc_display_name_ = "WebGMEWorkerService"
#_svc_description_ = ''
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
self.hJob = None
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
def SvcDoRun(self):
if hasattr(sys, "frozen"):
this_dir = os.path.dirname(win32api.GetModuleFileName(None))
else:
this_dir = os.path.dirname(os.path.abspath(__file__))
# TODO: maybe it is better to run this in a job object too
with open(os.path.join(this_dir, 'npm.log'), 'w') as npm_log:
subprocess.check_call('npm install', cwd=this_dir, shell=True, stdin=None, stdout=npm_log, stderr=subprocess.STDOUT)
security_attributes = win32security.SECURITY_ATTRIBUTES()
security_attributes.bInheritHandle = True
startup = win32process.STARTUPINFO()
startup.dwFlags |= win32process.STARTF_USESTDHANDLES
startup.hStdInput = None
startup.hStdOutput = win32file.CreateFile(os.path.join(this_dir, "service_stderr.log"), win32file.GENERIC_WRITE, win32file.FILE_SHARE_READ, security_attributes, win32file.CREATE_ALWAYS, 0, None)
startup.hStdError = win32file.CreateFile(os.path.join(this_dir, "service_stdout.log"), win32file.GENERIC_WRITE, win32file.FILE_SHARE_READ, security_attributes, win32file.CREATE_ALWAYS, 0, None)
(hProcess, hThread, processId, threadId) = win32process.CreateProcess(None, r'"C:\Program Files\nodejs\node.exe" node_worker.js', None, None, True,
win32process.CREATE_SUSPENDED | win32process.CREATE_BREAKAWAY_FROM_JOB, None, this_dir, startup)
assert not win32job.IsProcessInJob(hProcess, None)
self.hJob = win32job.CreateJobObject(None, "")
extended_info = win32job.QueryInformationJobObject(self.hJob, win32job.JobObjectExtendedLimitInformation)
extended_info['BasicLimitInformation']['LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE | win32job.JOB_OBJECT_LIMIT_BREAKAWAY_OK
win32job.SetInformationJobObject(self.hJob, win32job.JobObjectExtendedLimitInformation, extended_info)
win32job.AssignProcessToJobObject(self.hJob, hProcess)
win32process.ResumeThread(hThread)
win32api.CloseHandle(hThread)
signalled = win32event.WaitForMultipleObjects([self.hWaitStop, hProcess], False, win32event.INFINITE)
if signalled == win32event.WAIT_OBJECT_0 + 1 and win32process.GetExitCodeProcess(hProcess) != 0:
servicemanager.LogErrorMsg(self._svc_name_ + " process exited with non-zero status " + str(win32process.GetExitCodeProcess(hProcess)))
win32api.CloseHandle(hProcess)
win32api.CloseHandle(self.hJob)
win32api.CloseHandle(self.hWaitStop)
win32api.CloseHandle(startup.hStdOutput)
win32api.CloseHandle(startup.hStdError)
if __name__ == '__main__':
if len(sys.argv) == 3 and sys.argv[1] == 'addservicelogon':
user = sys.argv[2]
import subprocess
import io
import codecs
os.unlink('secpol.inf')
subprocess.check_call('secedit /export /cfg secpol.inf')
with io.open('secpol.inf', 'r', encoding='utf-16-le') as secpol_inf:
line = [l for l in secpol_inf.readlines() if l.startswith('SeServiceLogonRight = ')][0]
with open('secpol.inf', 'wb') as secpol_inf:
secpol_inf.write(codecs.BOM_UTF16_LE)
secpol_inf.write((u'''[Unicode]
Unicode=yes
[Privilege Rights]
%s
[Version]
signature="$CHICAGO$"
Revision=1
''' % (line.replace('\n', '').replace('\r', '') + ',' + user)).encode('utf-16-le'))
subprocess.check_call('secedit /configure /db database.sdb /cfg secpol.inf')
else:
win32serviceutil.HandleCommandLine(WebGMEWorkerService)
|
Python
| 0.000021 |
@@ -4185,16 +4185,34 @@
codecs%0D%0A
+ try:%0D%0A
@@ -4228,32 +4228,115 @@
('secpol.inf')%0D%0A
+ except OSError as e:%0D%0A if e.errno != 2:%0D%0A raise%0D%0A
subproce
|
e3753ac4b2c24c43014aab8121a34b9ad76d6b7a
|
update tests to v2.1.1 (#1597) (#1597)
|
exercises/hamming/hamming_test.py
|
exercises/hamming/hamming_test.py
|
import unittest
import hamming
# Tests adapted from `problem-specifications//canonical-data.json` @ v2.1.0
class HammingTest(unittest.TestCase):
def test_empty_strands(self):
self.assertEqual(hamming.distance("", ""), 0)
def test_identical_strands(self):
self.assertEqual(hamming.distance("A", "A"), 0)
def test_long_identical_strands(self):
self.assertEqual(hamming.distance("GGACTGA", "GGACTGA"), 0)
def test_complete_distance_in_single_nucleotide_strands(self):
self.assertEqual(hamming.distance("A", "G"), 1)
def test_complete_distance_in_small_strands(self):
self.assertEqual(hamming.distance("AG", "CT"), 2)
def test_small_distance_in_small_strands(self):
self.assertEqual(hamming.distance("AT", "CT"), 1)
def test_small_distance(self):
self.assertEqual(hamming.distance("GGACG", "GGTCG"), 1)
def test_small_distance_in_long_strands(self):
self.assertEqual(hamming.distance("ACCAGGG", "ACTATGG"), 2)
def test_non_unique_character_in_first_strand(self):
self.assertEqual(hamming.distance("AAG", "AAA"), 1)
def test_non_unique_character_in_second_strand(self):
self.assertEqual(hamming.distance("AAA", "AAG"), 1)
def test_same_nucleotides_in_different_positions(self):
self.assertEqual(hamming.distance("TAG", "GAT"), 2)
def test_large_distance(self):
self.assertEqual(hamming.distance("GATACA", "GCATAA"), 4)
def test_large_distance_in_off_by_one_strand(self):
self.assertEqual(hamming.distance("GGACGGATTCTG", "AGGACGGATTCT"), 9)
def test_disallow_first_strand_longer(self):
with self.assertRaisesWithMessage(ValueError):
hamming.distance("AATG", "AAA")
def test_disallow_second_strand_longer(self):
with self.assertRaisesWithMessage(ValueError):
hamming.distance("ATA", "AGTG")
# Utility functions
def setUp(self):
try:
self.assertRaisesRegex
except AttributeError:
self.assertRaisesRegex = self.assertRaisesRegexp
def assertRaisesWithMessage(self, exception):
return self.assertRaisesRegex(exception, r".+")
if __name__ == '__main__':
unittest.main()
|
Python
| 0 |
@@ -101,17 +101,17 @@
@ v2.1.
-0
+1
%0A%0Aclass
|
d47cfd7c1a4dd22ab175539dcb0e3702a21f8bb7
|
Move scaling factors to constant and explain
|
ynr/apps/moderation_queue/management/commands/moderation_queue_detect_faces_in_queued_images.py
|
ynr/apps/moderation_queue/management/commands/moderation_queue_detect_faces_in_queued_images.py
|
import json
import boto3
from django.core.management.base import BaseCommand, CommandError
from moderation_queue.models import QueuedImage
class Command(BaseCommand):
def handle(self, **options):
rekognition = boto3.client("rekognition", "eu-west-1")
attributes = ["ALL"]
any_failed = False
qs = QueuedImage.objects.filter(decision="undecided").exclude(
face_detection_tried=True
)
for qi in qs:
try:
detected = rekognition.detect_faces(
Image={"Bytes": qi.image.file.read()}, Attributes=attributes
)
self.set_x_y_from_response(qi, detected, options["verbosity"])
except Exception as e:
msg = "Skipping QueuedImage{id}: {error}"
self.stdout.write(msg.format(id=qi.id, error=e))
any_failed = True
qi.face_detection_tried = True
qi.save()
if any_failed:
raise CommandError("Broken images found (see above)")
def set_x_y_from_response(self, qi, detected, verbosity=0):
if detected and detected["FaceDetails"]:
im_width = qi.image.width
im_height = qi.image.height
bounding_box = detected["FaceDetails"][0]["BoundingBox"]
qi.crop_min_x = bounding_box["Left"] * im_width * 0.3
qi.crop_min_y = bounding_box["Top"] * im_height * 0.3
qi.crop_max_x = bounding_box["Width"] * im_width * 2
qi.crop_max_y = bounding_box["Height"] * im_height * 2
qi.detection_metadata = json.dumps(detected, indent=4)
if int(verbosity) > 1:
self.stdout.write("Set bounds of {}".format(qi))
else:
self.stdout.write("Couldn't find a face in {}".format(qi))
|
Python
| 0 |
@@ -137,16 +137,269 @@
Image%0A%0A%0A
+# These magic values are because the AWS API crops faces quite tightly by%0A# default, meaning we literally just get the face. These values are about%0A# right or, they are more right than the default crop.%0AMIN_SCALING_FACTOR = 0.3%0AMAX_SCALING_FACTOR = 2%0A%0A%0A
class Co
@@ -1633,19 +1633,34 @@
width *
-0.3
+MIN_SCALING_FACTOR
%0A
@@ -1714,19 +1714,34 @@
eight *
-0.3
+MIN_SCALING_FACTOR
%0A
@@ -1756,24 +1756,42 @@
crop_max_x =
+ (%0A
bounding_bo
@@ -1814,17 +1814,48 @@
width *
-2
+MAX_SCALING_FACTOR%0A )
%0A
@@ -1870,24 +1870,42 @@
crop_max_y =
+ (%0A
bounding_bo
@@ -1930,17 +1930,48 @@
eight *
-2
+MAX_SCALING_FACTOR%0A )
%0A
|
a0e0f7867e8e9805fb035a8db75e9d187fc06f3b
|
fix merge
|
rest_framework_social_oauth2/views.py
|
rest_framework_social_oauth2/views.py
|
# -*- coding: utf-8 -*-
import json
from braces.views import CsrfExemptMixin
from oauth2_provider.ext.rest_framework import OAuth2Authentication
from oauth2_provider.models import Application, AccessToken
from oauth2_provider.settings import oauth2_settings
from oauth2_provider.views.mixins import OAuthLibMixin
from rest_framework import permissions
from rest_framework import status
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.response import Response
from rest_framework.views import APIView
from .oauth2_backends import KeepRequestCore
from .oauth2_endpoints import SocialTokenServer
class ConvertTokenView(CsrfExemptMixin, OAuthLibMixin, APIView):
"""
Implements an endpoint to provide access tokens
The endpoint is used in the following flows:
* Authorization code
* Password
* Client credentials
"""
server_class = SocialTokenServer
validator_class = oauth2_settings.OAUTH2_VALIDATOR_CLASS
oauthlib_backend_class = KeepRequestCore
def post(self, request, *args, **kwargs):
# Use the rest framework `.data` to fake the post body of the django request.
request._request.POST = request._request.POST.copy()
for key, value in request.data.iteritems():
request._request.POST[key] = value
url, headers, body, status = self.create_token_response(request._request)
response = Response(data=json.loads(body), status=status)
for k, v in headers.items():
response[k] = v
return response
@api_view(['POST'])
@authentication_classes([OAuth2Authentication])
@permission_classes([permissions.IsAuthenticated])
def invalidate_sessions(request):
client_id = request.POST.get("client_id", None)
if client_id is None:
return Response({
"client_id": ["This field is required."]
}, status=status.HTTP_400_BAD_REQUEST)
try:
app = Application.objects.get(client_id=client_id)
except Application.DoesNotExist:
return Response({
"detail": "The application linked to the provided client_id could not be found."
}, status=status.HTTP_400_BAD_REQUEST)
tokens = AccessToken.objects.filter(user=request.user, application=app)
tokens.delete()
return Response({}, status=status.HTTP_204_NO_CONTENT)
|
Python
| 0.000001 |
@@ -1044,16 +1044,65 @@
uestCore
+%0A permission_classes = (permissions.AllowAny,)
%0A%0A de
|
d5bce66b6f9647a8bf8102acf6bd100c5bb70864
|
decode the string
|
proxy/src/gosa/proxy/mqtt_relay.py
|
proxy/src/gosa/proxy/mqtt_relay.py
|
import logging
from lxml import objectify, etree
from zope.interface import implementer
from gosa.common import Environment
from gosa.common.components import PluginRegistry
from gosa.common.components.mqtt_handler import MQTTHandler
from gosa.common.event import EventMaker
from gosa.common.handler import IInterfaceHandler
@implementer(IInterfaceHandler)
class MQTTRelayService(object):
"""
This service acts as a proxy between the backend and proxy MQTT brokers
to forward messages from one to the other.
In detail this service listens to (event-)messages from the backend to the clients on the backends MQTT broker
and forwards them to the clients (via the proxies MQTT broker) and the other way around.
In addition to that this service also handles events sent from the backend to the proxy (those are not forwarded
to the clients).
The message routing is done by the following rules:
**Received from backend MQTT broker**
* Subscribed to `<domain>/proxy` and `<domain>/client/#` topics
* all messages with topic not starting with `<domain>/proxy` are forwarded to the proxy MQTT broker
* `ClientPoll` and `Trigger` events are processed locally
**Received from proxy MQTT broker**
* Subscribed to`<domain>/client/#` topics
* all messages are forwarded to the backend MQTT broker
* (please note the the ClientService plugin
is also subscribed to the proxy MQTT broker and handles the client messages locally)
"""
_priority_ = 10
backend_mqtt = None
proxy_mqtt = None
def __init__(self):
self.env = Environment.getInstance()
self.log = logging.getLogger(__name__)
e = EventMaker()
self.goodbye = e.Event(e.BusClientState(e.Id(self.env.core_uuid), e.Type("proxy"), e.State("leave")))
self.hello = e.Event(e.BusClientState(e.Id(self.env.core_uuid), e.Type("proxy"), e.State("enter")))
def serve(self):
self.backend_mqtt = MQTTHandler(
host=self.env.config.get("backend.mqtt-host"),
port=self.env.config.getint("backend.mqtt-port", default=1883),
use_ssl=self.env.config.getboolean("backend.mqtt-ssl", default=True),
ca_file=self.env.config.get("backend.mqtt-ca_file"),
insecure=self.env.config.getboolean("backend.mqtt-insecure", default=None),
)
# subscribe to all client relevant topics
self.backend_mqtt.get_client().add_subscription("%s/client/#" % self.env.domain, qos=1)
# subscribe to proxy topic
self.backend_mqtt.get_client().add_subscription("%s/proxy" % self.env.domain, qos=1)
self.backend_mqtt.get_client().add_subscription("%s/bus" % self.env.domain, qos=1)
self.backend_mqtt.set_subscription_callback(self._handle_backend_message)
# set our last will and testament (on the backend broker)
self.backend_mqtt.will_set("%s/bus" % self.env.domain, self.goodbye, qos=1)
# connect to the proxy MQTT broker (where the clients are listening)
self.proxy_mqtt = MQTTHandler(
host=self.env.config.get("mqtt.host"),
port=self.env.config.getint("mqtt.port", default=1883))
self.proxy_mqtt.get_client().add_subscription("%s/client/#" % self.env.domain, qos=1)
self.proxy_mqtt.get_client().add_subscription("%s/bus" % self.env.domain, qos=1)
self.proxy_mqtt.set_subscription_callback(self._handle_proxy_message)
PluginRegistry.getInstance("CommandRegistry").init_backend_proxy(self.backend_mqtt)
self.backend_mqtt.send_event(self.hello, "%s/bus" % self.env.domain, qos=1)
self.proxy_mqtt.send_event(self.hello, "%s/bus" % self.env.domain, qos=1)
def _handle_backend_message(self, topic, message):
""" forwards backend messages to proxy MQTT and handles received events"""
forward = not topic.startswith("%s/proxy" % self.env.domain)
if message[0:1] != "{":
# event received
try:
xml = objectify.fromstring(message)
if hasattr(xml, "ClientPoll"):
self.__handleClientPoll()
elif hasattr(xml, "Trigger"):
if xml.Trigger.Type == "ACLChanged":
self.log.debug("ACLChanged trigger received, reloading ACLs")
resolver = PluginRegistry.getInstance("ACLResolver")
resolver.load_acls()
else:
self.log.warning("unhandled Trigger event of type: %s received" % xml.Trigger.Type)
except etree.XMLSyntaxError as e:
self.log.error("Message parsing error: %s" % e)
if forward is True:
self.log.debug("forwarding message in topic '%s' to proxy MQTT broker: %s" % (topic, message[0:80]))
self.proxy_mqtt.send_message(message, topic, qos=1)
def _handle_proxy_message(self, topic, message):
""" forwards proxy messages to backend MQTT """
if message[0:1] != "{":
# event received
try:
xml = objectify.fromstring(message)
if hasattr(xml, "UserSession"):
# these events need to be handled differently when they are relayed by a proxy, so we flag them
self.log.debug("Flagging UserSession-Event in topic '%s' as Proxied" % topic)
elem = objectify.SubElement(xml.UserSession, "Proxied")
elem._setText("true")
message = etree.tostring(xml)
except etree.XMLSyntaxError as e:
self.log.error("Message parsing error: %s" % e)
self.log.debug("forwarding message in topic '%s' to backend MQTT broker: %s" % (topic, message[0:80]))
self.backend_mqtt.send_message(message, topic, qos=1)
def __handleClientPoll(self):
""" register proxy-backend again """
index = PluginRegistry.getInstance("ObjectIndex")
index.registerProxy()
def close(self):
self.backend_mqtt.close()
self.proxy_mqtt.close()
def stop(self):
self.backend_mqtt.send_event(self.goodbye, "%s/bus" % self.env.domain, qos=1)
self.close()
self.backend_mqtt = None
self.proxy_mqtt = None
|
Python
| 1 |
@@ -5600,16 +5600,51 @@
ring(xml
+, pretty_print=True).decode('utf-8'
)%0A%0A
|
e13a74ae4e1884017593143e01e8882d7e802d7b
|
clean up imports
|
src/compas_rhino/geometry/__init__.py
|
src/compas_rhino/geometry/__init__.py
|
"""
********************************************************************************
geometry
********************************************************************************
.. currentmodule:: compas_rhino.geometry
Classes
=======
.. autosummary::
:toctree: generated/
:nosignatures:
RhinoGeometry
RhinoBox
RhinoCircle
RhinoCone
RhinoCurve
RhinoCylinder
RhinoEllipse
RhinoLine
RhinoMesh
RhinoPlane
RhinoPoint
RhinoPolyline
RhinoSphere
RhinoSurface
RhinoVector
"""
from __future__ import absolute_import
from ._geometry import RhinoGeometry
from .box import RhinoBox
from .circle import RhinoCircle
from .cone import RhinoCone
from .curve import RhinoCurve
from .cylinder import RhinoCylinder
from .ellipse import RhinoEllipse
from .line import RhinoLine
from .mesh import RhinoMesh
from .plane import RhinoPlane
from .point import RhinoPoint
from .polyline import RhinoPolyline
from .sphere import RhinoSphere
from .surface import RhinoSurface
from .vector import RhinoVector
BaseRhinoGeometry = RhinoGeometry
__all__ = [
'RhinoGeometry',
'RhinoBox',
'RhinoCircle',
'RhinoCone',
'RhinoCurve',
'RhinoCylinder',
'RhinoEllipse',
'RhinoLine',
'RhinoMesh',
'RhinoPlane',
'RhinoPoint',
'RhinoPolyline',
'RhinoSphere',
'RhinoSurface',
'RhinoVector',
]
|
Python
| 0.000001 |
@@ -215,1169 +215,62 @@
ry%0A%0A
-Classes%0A=======%0A%0A.. autosummary::%0A :toctree: generated/%0A :nosignatures:%0A%0A RhinoGeometry%0A RhinoBox%0A RhinoCircle%0A RhinoCone%0A RhinoCurve%0A RhinoCylinder%0A RhinoEllipse%0A RhinoLine%0A RhinoMesh%0A RhinoPlane%0A RhinoPoint%0A RhinoPolyline%0A RhinoSphere%0A RhinoSurface%0A RhinoVector%0A%0A%22%22%22%0Afrom __future__ import absolute_import%0A%0Afrom ._geometry import RhinoGeometry%0A%0Afrom .box import RhinoBox%0Afrom .circle import RhinoCircle%0Afrom .cone import RhinoCone%0Afrom .curve import RhinoCurve%0Afrom .cylinder import RhinoCylinder%0Afrom .ellipse import RhinoEllipse%0Afrom .line import RhinoLine%0Afrom .mesh import RhinoMesh%0Afrom .plane import RhinoPlane%0Afrom .point import RhinoPoint%0Afrom .polyline import RhinoPolyline%0Afrom .sphere import RhinoSphere%0Afrom .surface import RhinoSurface%0Afrom .vector import RhinoVector%0A%0ABaseRhinoGeometry = RhinoGeometry%0A%0A__all__ = %5B%0A 'RhinoGeometry',%0A 'RhinoBox',%0A 'RhinoCircle',%0A 'RhinoCone',%0A 'RhinoCurve',%0A 'RhinoCylinder',%0A 'RhinoEllipse',%0A 'RhinoLine',%0A 'RhinoMesh',%0A 'RhinoPlane',%0A 'RhinoPoint',%0A 'RhinoPolyline',%0A 'RhinoSphere',%0A 'RhinoSurface',%0A 'RhinoVector',%0A
+%22%22%22%0Afrom __future__ import absolute_import%0A%0A%0A__all__ = %5B
%5D%0A
|
e81e57140fd1010feaa2f2e05df77b370eb92087
|
Update affected / failing test.
|
st2actions/tests/unit/test_pythonrunner.py
|
st2actions/tests/unit/test_pythonrunner.py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest2 import TestCase
import mock
from st2actions.runners import pythonrunner
from st2actions.container import service
from st2common.constants.action import ACTION_OUTPUT_RESULT_DELIMITER
from st2common.constants.action import LIVEACTION_STATUS_SUCCEEDED, LIVEACTION_STATUS_FAILED
from st2common.constants.pack import SYSTEM_PACK_NAME
import st2tests.base as tests_base
import st2tests.config as tests_config
PACAL_ROW_ACTION_PATH = os.path.join(tests_base.get_resources_path(), 'packs',
'pythonactions/actions/pascal_row.py')
class PythonRunnerTestCase(TestCase):
@classmethod
def setUpClass(cls):
tests_config.parse_args()
def test_runner_creation(self):
runner = pythonrunner.get_runner()
self.assertTrue(runner is not None, 'Creation failed. No instance.')
self.assertEqual(type(runner), pythonrunner.PythonRunner, 'Creation failed. No instance.')
def test_simple_action(self):
runner = pythonrunner.get_runner()
runner.action = self._get_mock_action_obj()
runner.runner_parameters = {}
runner.entry_point = PACAL_ROW_ACTION_PATH
runner.container_service = service.RunnerContainerService()
runner.pre_run()
(status, result, _) = runner.run({'row_index': 4})
self.assertEqual(status, LIVEACTION_STATUS_SUCCEEDED)
self.assertTrue(result is not None)
self.assertEqual(result['result'], [1, 4, 6, 4, 1])
def test_simple_action_fail(self):
runner = pythonrunner.get_runner()
runner.action = self._get_mock_action_obj()
runner.runner_parameters = {}
runner.entry_point = PACAL_ROW_ACTION_PATH
runner.container_service = service.RunnerContainerService()
runner.pre_run()
(status, result, _) = runner.run({'row_index': '4'})
self.assertTrue(result is not None)
self.assertEqual(status, LIVEACTION_STATUS_FAILED)
def test_simple_action_no_file(self):
runner = pythonrunner.get_runner()
runner.action = self._get_mock_action_obj()
runner.runner_parameters = {}
runner.entry_point = 'foo.py'
runner.container_service = service.RunnerContainerService()
runner.pre_run()
(status, result, _) = runner.run({})
self.assertTrue(result is not None)
self.assertEqual(status, LIVEACTION_STATUS_FAILED)
def test_simple_action_no_entry_point(self):
runner = pythonrunner.get_runner()
runner.action = self._get_mock_action_obj()
runner.runner_parameters = {}
runner.entry_point = ''
runner.container_service = service.RunnerContainerService()
expected_msg = 'Action .*? is missing entry_point attribute'
self.assertRaisesRegexp(Exception, expected_msg, runner.run, {})
@mock.patch('st2actions.runners.pythonrunner.subprocess.Popen')
def test_action_with_user_supplied_env_vars(self, mock_popen):
env_vars = {'key1': 'val1', 'key2': 'val2', 'PYTHONPATH': 'foobar'}
mock_process = mock.Mock()
mock_process.communicate.return_value = ('', '')
mock_popen.return_value = mock_process
runner = pythonrunner.get_runner()
runner.action = self._get_mock_action_obj()
runner.runner_parameters = {'env': env_vars}
runner.entry_point = PACAL_ROW_ACTION_PATH
runner.container_service = service.RunnerContainerService()
runner.pre_run()
(_, _, _) = runner.run({'row_index': 4})
_, call_kwargs = mock_popen.call_args
actual_env = call_kwargs['env']
for key, value in env_vars.items():
# Verify that a blacklsited PYTHONPATH has been filtered out
if key == 'PYTHONPATH':
self.assertTrue(actual_env[key] != value)
else:
self.assertEqual(actual_env[key], value)
@mock.patch('st2actions.runners.pythonrunner.subprocess.Popen')
def test_stdout_interception_and_parsing(self, mock_popen):
values = {'delimiter': ACTION_OUTPUT_RESULT_DELIMITER}
# No output to stdout and no result (implicit None)
mock_stdout = '%(delimiter)sNone%(delimiter)s' % values
mock_stderr = 'foo stderr'
mock_process = mock.Mock()
mock_process.communicate.return_value = (mock_stdout, mock_stderr)
mock_process.returncode = 0
mock_popen.return_value = mock_process
runner = pythonrunner.get_runner()
runner.action = self._get_mock_action_obj()
runner.runner_parameters = {}
runner.entry_point = PACAL_ROW_ACTION_PATH
runner.container_service = service.RunnerContainerService()
runner.pre_run()
(_, output, _) = runner.run({'row_index': 4})
self.assertEqual(output['stdout'], '')
self.assertEqual(output['stderr'], mock_stderr)
self.assertEqual(output['result'], 'None')
self.assertEqual(output['exit_code'], 0)
# Output to stdout and no result (implicit None)
mock_stdout = 'pre result%(delimiter)sNone%(delimiter)spost result' % values
mock_stderr = 'foo stderr'
mock_process = mock.Mock()
mock_process.communicate.return_value = (mock_stdout, mock_stderr)
mock_process.returncode = 0
mock_popen.return_value = mock_process
runner = pythonrunner.get_runner()
runner.action = self._get_mock_action_obj()
runner.runner_parameters = {}
runner.entry_point = PACAL_ROW_ACTION_PATH
runner.container_service = service.RunnerContainerService()
runner.pre_run()
(_, output, _) = runner.run({'row_index': 4})
self.assertEqual(output['stdout'], 'pre resultpost result')
self.assertEqual(output['stderr'], mock_stderr)
self.assertEqual(output['result'], 'None')
self.assertEqual(output['exit_code'], 0)
def _get_mock_action_obj(self):
"""
Return mock action object.
Pack gets set to the system pack so the action doesn't require a separate virtualenv.
"""
action = mock.Mock()
action.pack = SYSTEM_PACK_NAME
action.entry_point = 'foo.py'
return action
|
Python
| 0 |
@@ -1362,16 +1362,233 @@
w.py')%0A%0A
+# Note: runner inherits parent args which doesn't work with tests since test pass additional%0A# unrecognized args%0Amock_sys = mock.Mock()%0Amock_sys.argv = %5B%5D%0A%0A%[email protected]('st2actions.runners.pythonrunner.sys', mock_sys)
%0Aclass P
|
3fb93c4b839457430180f65f1feae4c7abdba0ac
|
tag celery syslog messages
|
dbaas/dbaas/celery.py
|
dbaas/dbaas/celery.py
|
from __future__ import absolute_import
import os
import logging
from datetime import timedelta
from celery import Celery
from django.conf import settings
from dbaas import celeryconfig
from logging.handlers import SysLogHandler
from celery.log import redirect_stdouts_to_logger
from celery.signals import after_setup_task_logger, after_setup_logger
def setup_log(**args):
# redirect stdout and stderr to logger
redirect_stdouts_to_logger(args['logger'])
# logs to local syslog
syslog = SysLogHandler(address=settings.SYSLOG_FILE, facility=logging.handlers.SysLogHandler.LOG_LOCAL3)
# setting log level
syslog.setLevel(args['loglevel'])
# setting log format
formatter = logging.Formatter('dbaas: %(name)s %(message)s')
syslog.setFormatter(formatter)
# add new handler to logger
args['logger'].addHandler(syslog)
after_setup_logger.connect(setup_log)
after_setup_task_logger.connect(setup_log)
LOG = logging.getLogger(__name__)
#set this variable to True to run celery tasks synchronously
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dbaas.settings')
app = Celery('dbaas')
app.config_from_object(celeryconfig)
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
LOG.debug('Request: {0!r}'.format(self.request))
|
Python
| 0 |
@@ -727,16 +727,24 @@
('dbaas:
+ #celery
%25(name)
|
c0f917c6098b18479a69fe129a0fd19d11f67df7
|
Fix startup
|
src/btsoot.py
|
src/btsoot.py
|
Python
| 0.000004 |
@@ -0,0 +1,1337 @@
+#!/usr/bin/env python3.5%0A#MIT License%0A#%0A#Copyright (c) 2016 Paul Kramme%0A#%0A#Permission is hereby granted, free of charge, to any person obtaining a copy%0A#of this software and associated documentation files (the %22Software%22), to deal%0A#in the Software without restriction, including without limitation the rights%0A#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell%0A#copies of the Software, and to permit persons to whom the Software is%0A#furnished to do so, subject to the following conditions:%0A#%0A#The above copyright notice and this permission notice shall be included in all%0A#copies or substantial portions of the Software.%0A#%0A#THE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR%0A#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,%0A#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE%0A#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER%0A#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,%0A#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE%0A#SOFTWARE.%0A%0Adef main():%0A%09print(%22BTSOOT 0.1.0%22)%0A%0Aif __name__ == __name__:%0A%09try:%0A%09%09main()%0A%09except KeyboardInterrupt:%0A%09%09print(%22Stopping program.%22)%0A%09%09exit()%0A%09except Exception:%0A%09%09print(%22Unknown Critical Exception%22)%0A%09%09print(%22Quitting...%22)%0A%0A
|
|
5cee2f6bc69a203cc9d20d50f375c3744c4b8753
|
Remove self from args passed to super
|
decisiontree/forms.py
|
decisiontree/forms.py
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from django import forms
from django.contrib.auth.models import User
from decisiontree.models import Answer, Entry, Question, Tag, TagNotification, Transition, Tree, TreeState
from decisiontree.utils import parse_tags, edit_string_for_tags
class AnswerForm(forms.ModelForm):
class Meta:
model = Answer
def clean_alias(self):
data = self.cleaned_data["trigger"]
return data.lower()
class TreesForm(forms.ModelForm):
class Meta:
model = Tree
def __init__(self, *args, **kwargs):
super(TreesForm, self).__init__(*args, **kwargs)
states = TreeState.objects.select_related('question')
states = states.order_by('question__text')
self.fields['root_state'].label = 'First State'
self.fields['root_state'].queryset = states
self.fields['trigger'].label = 'Keyword'
self.fields['completion_text'].label = 'Completion Text'
class QuestionForm(forms.ModelForm):
class Meta:
model = Question
def __init__(self, *args, **kwargs):
super(QuestionForm, self).__init__(*args, **kwargs)
self.fields['text'].label = 'Message Text'
self.fields['error_response'].label = 'Error Text'
class StateForm(forms.ModelForm):
class Meta:
model = TreeState
class ReportForm(forms.Form):
ANALYSIS_TYPES = (
('A', 'Mean'),
('R', 'Median'),
('C', 'Mode'),
)
#answer = forms.CharField(label=("answer"),required=False)
dataanalysis = forms.ChoiceField(choices=ANALYSIS_TYPES)
class AnswerSearchForm(forms.Form):
# ANALYSIS_TYPES = (
# ('A', 'Mean'),
# ('R', 'Median'),
# ('C', 'Mode'),
# )
# answer = forms.ModelChoiceField(queryset=Answer.objects.none())
# analysis = forms.ChoiceField(choices=ANALYSIS_TYPES)
tag = forms.ModelChoiceField(queryset=Tag.objects.none(), required=False)
def __init__(self, *args, **kwargs):
tree = kwargs.pop('tree')
super(AnswerSearchForm, self).__init__(*args, **kwargs)
# answers = \
# Answer.objects.filter(transitions__entries__session__tree=tree)
tags = Tag.objects.filter(entries__session__tree=tree).distinct()
# self.fields['answer'].queryset = answers.distinct()
self.fields['tag'].queryset = tags
# self.fields['analysis'].label = 'Calculator'
# self.fields['tag'].label = 'Calculator'
class TagWidget(forms.TextInput):
def render(self, name, value, attrs=None):
if value is not None and not isinstance(value, basestring):
value = edit_string_for_tags(Tag.objects.filter(id__in=value))
return super(TagWidget, self).render(name, value, attrs)
class TagField(forms.CharField):
widget = TagWidget
def __init__(self, *args, **kwargs):
if 'help_text' not in kwargs:
kwargs['help_text'] = """Tags with spaces must be quoted, for example: apple "ball cat" dog, will result in "apple", "ball cat", and "dog" tags"""
super(TagField, self).__init__(self, *args, **kwargs)
def clean(self, value):
try:
tag_names = parse_tags(value)
except ValueError:
raise forms.ValidationError(_("Please provide a comma-separated list of tags."))
tags = []
for tag_name in tag_names:
tag, _ = Tag.objects.get_or_create(name=tag_name)
tags.append(tag)
return tags
class EntryTagForm(forms.ModelForm):
tags = TagField()
class Meta:
model = Entry
fields = ('tags',)
def save(self):
entry = super(EntryTagForm, self).save()
# create tag notifications
TagNotification.create_from_entry(entry)
return entry
class PathForm(forms.ModelForm):
tags = TagField(required=False)
class Meta:
model = Transition
def __init__(self, *args, **kwargs):
super(PathForm, self).__init__(*args, **kwargs)
states = TreeState.objects.select_related('question')
states = states.order_by('question__text')
self.fields['current_state'].queryset = states
self.fields['current_state'].label = 'Current State'
self.fields['answer'].label = 'Answer'
self.fields['answer'].queryset = Answer.objects.order_by('answer')
self.fields['next_state'].label = 'Next State'
self.fields['next_state'].queryset = states
self.fields['tags'].label = 'Auto tags'
class TagForm(forms.ModelForm):
class Meta:
model = Tag
def __init__(self, *args, **kwargs):
super(TagForm, self).__init__(*args, **kwargs)
self.fields['recipients'] = forms.ModelMultipleChoiceField(
queryset=User.objects.exclude(email=''),
widget=forms.CheckboxSelectMultiple,
required=False,
)
class TreeSummaryForm(forms.ModelForm):
class Meta:
model = Tree
fields = ('summary',)
def __init__(self, *args, **kwargs):
super(TreeSummaryForm, self).__init__(*args, **kwargs)
self.fields['summary'].widget = forms.Textarea()
|
Python
| 0 |
@@ -3119,38 +3119,32 @@
self).__init__(
-self,
*args, **kwargs)
|
a468ad4a8f28fb7c88f56869ae68de7b4b55ff39
|
Fix 'ResultSet has no len' bug in delete_insert_test
|
delete_insert_test.py
|
delete_insert_test.py
|
import random
import threading
import uuid
from cassandra import ConsistencyLevel
from cassandra.query import SimpleStatement
from dtest import Tester
class DeleteInsertTest(Tester):
"""
Examines scenarios around deleting data and adding data back with the same key
"""
def __init__(self, *args, **kwargs):
Tester.__init__(self, *args, **kwargs)
# Generate 1000 rows in memory so we can re-use the same ones over again:
self.groups = ['group1', 'group2', 'group3', 'group4']
self.rows = [(str(uuid.uuid1()), x, random.choice(self.groups)) for x in range(1000)]
def create_ddl(self, session, rf={'dc1': 2, 'dc2': 2}):
self.create_ks(session, 'delete_insert_search_test', rf)
session.execute('CREATE TABLE test (id uuid PRIMARY KEY, val1 text, group text)')
session.execute('CREATE INDEX group_idx ON test (group)')
def delete_group_rows(self, session, group):
"""Delete rows from a given group and return them"""
rows = [r for r in self.rows if r[2] == group]
ids = [r[0] for r in rows]
session.execute('DELETE FROM test WHERE id in (%s)' % ', '.join(ids))
return rows
def insert_all_rows(self, session):
self.insert_some_rows(session, self.rows)
def insert_some_rows(self, session, rows):
for row in rows:
session.execute("INSERT INTO test (id, val1, group) VALUES (%s, '%s', '%s')" % row)
def delete_insert_search_test(self):
cluster = self.cluster
cluster.populate([2, 2]).start()
node1 = cluster.nodelist()[0]
session = self.cql_connection(node1)
session.consistency_level = 'LOCAL_QUORUM'
self.create_ddl(session)
# Create 1000 rows:
self.insert_all_rows(session)
# Delete all of group2:
deleted = self.delete_group_rows(session, 'group2')
# Put that group back:
self.insert_some_rows(session, rows=deleted)
# Verify that all of group2 is back, 20 times, in parallel
# querying across all nodes:
class ThreadedQuery(threading.Thread):
def __init__(self, connection):
threading.Thread.__init__(self)
self.connection = connection
def run(self):
session = self.connection
query = SimpleStatement("SELECT * FROM delete_insert_search_test.test WHERE group = 'group2'", consistency_level=ConsistencyLevel.LOCAL_QUORUM)
rows = session.execute(query)
assert len(rows) == len(deleted)
threads = []
for x in range(20):
conn = self.cql_connection(random.choice(cluster.nodelist()))
threads.append(ThreadedQuery(conn))
for t in threads:
t.start()
for t in threads:
t.join()
|
Python
| 0.000002 |
@@ -1184,20 +1184,26 @@
return
+list(
rows
+)
%0A%0A de
@@ -2574,21 +2574,27 @@
ert len(
+list(
rows)
+)
== len(
|
21620653125f33fd0d19c1bb2f16b51ec3c853f9
|
fix tmin/tmax
|
ASC/SkyPie.py
|
ASC/SkyPie.py
|
#! /usr/bin/env python
#
# Takes about 15" fpr 1400 images on laptop with a local fast disk
#
import matplotlib.pyplot as plt
import numpy as np
import sys
date = ''
table = sys.argv[1]
png = table + '.png'
twopi = 2*np.pi
# table of time index (1...N) and median sky brightness (50,000 is very bright)
(t,s) = np.loadtxt(table).T
print("Sky: ",s.min(),s.max())
print("Time:",t.min(),t.max())
t0 = t[0]
t1 = t[-1]
print(t0,t1)
# degrees for polar plot
tmin = (t0-12.0)*180/12.0
tmax = 360 - (12-t1)*180/12.0
x = (12+24-t) * twopi / 24.0
y = s.max()-s
print(x.min(),x.max())
print(y.min(),y.max())
fig, ax = plt.subplots(1, 1, subplot_kw=dict(projection='polar'))
ax.plot(x, y)
ax.set_theta_zero_location('S')
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.set_thetamin(tmin)
ax.set_thetamax(tmax)
ya = 0.2 * y
yb = 0.4 * y
yc = 0.8 * y
yd = 0.8 * y
ye = 0.9 * y
ax.fill_between(x,0, ya,facecolor='green',alpha=0.1)
ax.fill_between(x,ya,yb,facecolor='green',alpha=0.3)
ax.fill_between(x,yb,yc,facecolor='green',alpha=0.5)
ax.fill_between(x,yc,yd,facecolor='green',alpha=0.7)
ax.fill_between(x,yd,ye,facecolor='green',alpha=0.85)
ax.fill_between(x,ye,y ,facecolor='green',alpha=1)
# needs tweaking
plt.text(3.14,50000,'midnight',horizontalalignment='center')
plt.text(1.1,42000,'sunrise')
plt.text(5.1,48000,'sunset')
plt.text(5.5,20000,'imagine a moon')
plt.title("%s sky: %g %g %g-%g h" % (table,s.min(),s.max(),t0,t1))
plt.savefig(png)
plt.show()
print("Written ",png)
|
Python
| 0.00001 |
@@ -37,17 +37,17 @@
ut 15%22 f
-p
+o
r 1400 i
@@ -84,16 +84,66 @@
ast disk
+ (100%25 cpu)%0A# But 60%22 on the Xeon, but at 300%25 cpu
%0A#%0Aimpor
@@ -204,18 +204,8 @@
ys%0A%0A
-date = ''%0A
tabl
@@ -277,26 +277,25 @@
of
-time index (1...N)
+decimal hour time
and
@@ -470,65 +470,147 @@
t1)%0A
+%0A
#
-degrees for polar plot%0Atmin = (t0-12.0)*180/12.
+tmin is the sunrise, from t1 (6), should be near 90%0A# tmax is the sunset, from t0 (18) 270%0Atmin = (6-t1)*15 + 9
0%0Atmax =
360
@@ -609,40 +609,51 @@
ax =
- 360 -
(1
-2-t1)*180/12.0%0A
+8-t0)*15 + 270%0A%0Aprint(tmin,tmax)
%0Ax = (12
+24-
@@ -652,11 +652,8 @@
(12
-+24
-t)
@@ -1037,24 +1037,126 @@
Formatter())
+%0A%0Aif False:%0A # always same pie, an extra hour either side%0A tmin=75%0A tmax=285%0Aprint(tmin,tmax)
%0Aax.set_thet
@@ -1595,16 +1595,18 @@
pha=1)%0A%0A
+%0A%0A
# needs
|
d84034db71abac46ef765f1640f3efa6712f5c42
|
Update RegisterHandler.py
|
Handlers/RegisterHandler.py
|
Handlers/RegisterHandler.py
|
# -*- coding: utf-8 -*-
import logging
from Handlers.BaseHandler import BaseHandler
from Tools import PostgreSQL, VerifyFields
logger = logging.getLogger(__name__)
class RegisterHandler(BaseHandler):
"""handle / endpoint"""
def initialize(self):
self.conn = PostgreSQL.get_session()
def get(self):
"""Serve Get and return main page"""
self.render('register.html')
def post(self):
"""Get user completed form and verify it before save it"""
prenom = self.get_body_argument('prenom')
nom = self.get_body_argument('nom')
courriel = self.get_body_argument('courriel')
genre = self.get_body_argument('genre')
promotion = int(self.get_body_argument('promotion'))
if VerifyFields.verify_all(prenom, nom, courriel, genre, promotion):
PostgreSQL.insert_inscrit(prenom, nom, genre, courriel, promotion)
self.render('registered.html')
else:
self.send_error(status_code=400)
|
Python
| 0.000001 |
@@ -230,80 +230,8 @@
%22%22%0A%0A
- def initialize(self):%0A self.conn = PostgreSQL.get_session()%0A%0A
|
a7160ce9345b14e656ce702b187048347b843811
|
update test_with_count_combination.py
|
tests/unit/selection/modules/test_with_count_combination.py
|
tests/unit/selection/modules/test_with_count_combination.py
|
# Tai Sakuma <[email protected]>
import itertools
import pytest
try:
import unittest.mock as mock
except ImportError:
import mock
from alphatwirl.selection.modules import AllwCount, AnywCount, NotwCount
##__________________________________________________________________||
class MockEventSelection(object):
def begin(self, event): pass
def __call__(self, event): pass
def end(self): pass
##__________________________________________________________________||
def test_combination():
# all0 - all1 --- all2 --- sel1
# | +- sel2
# +- not1 --- any1 --- all3 --- sel3
# | +- sel4
# +- sel5
sel1 = mock.Mock(spec=MockEventSelection)
sel1.name ='sel1'
sel2 = mock.Mock(spec=MockEventSelection)
sel2.name ='sel2'
sel3 = mock.Mock(spec=MockEventSelection)
sel3.name ='sel3'
sel4 = mock.Mock(spec=MockEventSelection)
sel4.name ='sel4'
sel5 = mock.Mock(spec=MockEventSelection)
sel5.name ='sel5'
all0 = AllwCount(name='all0')
all1 = AllwCount(name='all1')
all2 = AllwCount(name='all2')
all3 = AllwCount(name='all3')
any1 = AnywCount(name='any1')
all3.add(sel3)
all3.add(sel4)
any1.add(all3)
any1.add(sel5)
not1 = NotwCount(any1, name='not1')
all2.add(sel1)
all2.add(sel2)
all1.add(all2)
all1.add(not1)
all0.add(all1)
event = mock.Mock()
all0.begin(event)
for l in itertools.product(*[[True, False]]*5):
sel1.return_value = l[0]
sel2.return_value = l[1]
sel3.return_value = l[2]
sel4.return_value = l[3]
sel5.return_value = l[4]
all0(event)
all0.end()
count = all0.results()
assert [
[1, 'AllwCount', 'all1', 3, 32],
[2, 'AllwCount', 'all2', 8, 32],
[3, 'MockEventSelection', 'sel1', 16, 32],
[3, 'MockEventSelection', 'sel2', 8, 16],
[2 , 'NotwCount', 'not1', 3, 8],
[3, 'AnywCount', 'any1', 5, 8],
[4, 'AllwCount', 'all3', 2, 8],
[5, 'MockEventSelection', 'sel3', 4, 8],
[5, 'MockEventSelection', 'sel4', 2, 4],
[4, 'MockEventSelection', 'sel5', 3, 6]
] == count._results
##__________________________________________________________________||
|
Python
| 0.000002 |
@@ -484,28 +484,34 @@
_%7C%7C%0A
-def test_combination
[email protected]()%0Adef tree
():%0A
@@ -1460,24 +1460,329 @@
.add(all1)%0A%0A
+ return dict(%0A alls=(all0, all1, all2, all3),%0A anys=(any1, ),%0A nots=(not1, ),%0A sels=(sel1, sel2, sel3, sel4, sel5)%0A )%0A%0A##__________________________________________________________________%7C%7C%0Adef test_combination(tree):%0A%0A all0 = tree%5B'alls'%5D%5B0%5D%0A sels = tree%5B'sels'%5D%0A%0A
event =
@@ -1824,16 +1824,30 @@
-for l in
+all_possible_results =
ite
@@ -1882,110 +1882,279 @@
e%5D%5D*
-5):%0A sel1.return_value = l%5B0%5D
+len(sels))%0A # e.g.,%0A # %5B%0A # (True, True, True, True, True),%0A # (True, True, True, True, False),%0A # ...
%0A
+ #
-sel2.return_value = l%5B1%5D%0A sel3.return_value = l%5B2%5D
+(False, False, False, False, False)%0A # %5D%0A%0A for l in all_possible_results:%0A # e.g. l = (True, True, False, True, False)
%0A
@@ -2162,41 +2162,49 @@
+for
sel
-4.return_value = l%5B3%5D%0A
+, ret in zip(sels, l):%0A
sel5
@@ -2199,20 +2199,20 @@
+
sel
-5
.return_
@@ -2223,12 +2223,11 @@
e =
-l%5B4%5D
+ret
%0A
|
19b251a41ad26d7dabaf571b9bb90b82b9108d4b
|
fix hostname config issue
|
vint/cerf_api.py
|
vint/cerf_api.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import json
from urlparse import urljoin
import requests
__author__ = 'tchen'
logger = logging.getLogger(__name__)
DEFAULT_HOSTNAME = 'http://exam.tchen.me'
#DEFAULT_HOSTNAME = 'http://localhost:8000'
class Request(object):
hostname = ''
api_path = '/'
def __init__(self, authcode):
self.authcode = authcode
self.api_base = self.hostname + self.api_path
def retrieve(self, id):
url = urljoin(self.api_base, str(id)) + '/'
try:
r = requests.get(url, data={'authcode': self.authcode})
return json.loads(r.text)
except:
return {}
def delete(self, id):
url = urljoin(self.api_base, str(id)) + '/'
try:
r = requests.delete(url, data={'authcode': self.authcode})
if r.status_code == requests.codes.no_content:
return True
return False
except:
return False
class Cerf(object):
def __init__(self, id, authcode, hostname=DEFAULT_HOSTNAME):
from misc import config
self.id = id
self.authcode = authcode
self.hostname = hostname
if config:
try:
self.hostname = config.get('global', 'host')
except:
pass
self.interview = Interview(authcode, id)
self.exam = Exam(authcode)
self.answer = Answer(authcode)
class Interview(Request):
hostname = DEFAULT_HOSTNAME
api_path = '/api/interviews/'
def __init__(self, authcode, id):
super(Interview, self).__init__(authcode)
self.id = id
def update(self, action, id=None, authcode=None):
id = id or self.id
authcode = authcode or self.authcode
url = urljoin(self.api_base, str(id)) + '/'
try:
r = requests.put(url, data={'authcode': authcode, 'action': action})
return json.loads(r.text)
except:
return {}
def start(self, id=None, authcode=None):
return self.update('start', id, authcode)
def finish(self, id=None, authcode=None):
return self.update('finish', id, authcode)
def reset(self, id=None, authcode=None):
return self.update('reset', id, authcode)
class Exam(Request):
hostname = DEFAULT_HOSTNAME
api_path = '/api/exams/'
class Answer(Request):
hostname = DEFAULT_HOSTNAME
api_path = '/api/answers/'
def create(self, data):
headers = {'Content-type': 'application/json', 'Accept': '*/*'}
try:
r = requests.post(self.api_base + '?authcode=%s' % self.authcode,
data=json.dumps(data), headers=headers)
if r.status_code != requests.codes.created:
return {}
return json.loads(r.text)
except Exception:
return {}
|
Python
| 0.000004 |
@@ -368,24 +368,56 @@
authcode):%0A
+ from misc import config%0A
self
@@ -492,16 +492,155 @@
i_path%0A%0A
+ if config:%0A try:%0A self.hostname = config.get('global', 'host')%0A except:%0A pass%0A%0A
def
@@ -1481,32 +1481,89 @@
lobal', 'host')%0A
+ print 'Host name is: %25s' %25 self.hostname%0A
exce
|
63153322b9b4ef8d852c8e8a1621a724b8a56ac7
|
Update mongo_future_return.py
|
salt/returners/mongo_future_return.py
|
salt/returners/mongo_future_return.py
|
# -*- coding: utf-8 -*-
'''
Return data to a mongodb server
Required python modules: pymongo
This returner will send data from the minions to a MongoDB server. To
configure the settings for your MongoDB server, add the following lines
to the minion config files:
.. code-block:: yaml
mongo.db: <database name>
mongo.host: <server ip address>
mongo.user: <MongoDB username>
mongo.password: <MongoDB user password>
mongo.port: 27017
You can also ask for indexes creation on the most common used fields, which
should greatly improve performance. Indexes are not created by default.
.. code-block:: yaml
mongo.indexes: true
Alternative configuration values can be used by prefacing the configuration.
Any values not found in the alternative configuration will be pulled from
the default location:
.. code-block:: yaml
alternative.mongo.db: <database name>
alternative.mongo.host: <server ip address>
alternative.mongo.user: <MongoDB username>
alternative.mongo.password: <MongoDB user password>
alternative.mongo.port: 27017
This mongo returner is being developed to replace the default mongodb returner
in the future and should not be considered API stable yet.
To use the mongo returner, append '--return mongo' to the salt command.
.. code-block:: bash
salt '*' test.ping --return mongo
To use the alternative configuration, append '--return_config alternative' to the salt command.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' test.ping --return mongo --return_config alternative
'''
from __future__ import absolute_import
# Import python libs
import logging
# Import Salt libs
import salt.utils.jid
import salt.returners
import salt.ext.six as six
# Import third party libs
try:
import pymongo
version = pymongo.version
HAS_PYMONGO = True
except ImportError:
HAS_PYMONGO = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'mongo'
def __virtual__():
if not HAS_PYMONGO:
return False
return __virtualname__
def _remove_dots(src):
'''
Remove the dots from the given data structure
'''
output = {}
for key, val in six.iteritems(src):
if isinstance(val, dict):
val = _remove_dots(val)
output[key.replace('.', '-')] = val
return output
def _get_options(ret=None):
'''
Get the mongo options from salt.
'''
attrs = {'host': 'host',
'port': 'port',
'db': 'db',
'username': 'username',
'password': 'password',
'indexes': 'indexes'}
_options = salt.returners.get_returner_options(__virtualname__,
ret,
attrs,
__salt__=__salt__,
__opts__=__opts__)
return _options
def _get_conn(ret):
'''
Return a mongodb connection object
'''
_options = _get_options(ret)
host = _options.get('host')
port = _options.get('port')
db_ = _options.get('db')
user = _options.get('user')
password = _options.get('password')
indexes = _options.get('indexes', False)
if float(version) > 2.3:
conn = pymongo.MongoClient(host, port)
else:
conn = pymongo.Connection(host, port)
mdb = conn[db_]
if user and password:
mdb.authenticate(user, password)
if indexes:
mdb.saltReturns.ensure_index('minion')
mdb.saltReturns.ensure_index('jid')
mdb.jobs.ensure_index('jid')
return conn, mdb
def returner(ret):
'''
Return data to a mongodb server
'''
conn, mdb = _get_conn(ret)
if isinstance(ret['return'], dict):
back = _remove_dots(ret['return'])
else:
back = ret['return']
if isinstance(ret, dict):
full_ret = _remove_dots(ret)
else:
full_ret = ret
log.debug(back)
sdata = {'minion': ret['id'], 'jid': ret['jid'], 'return': back, 'fun': ret['fun'], 'full_ret': full_ret}
if 'out' in ret:
sdata['out'] = ret['out']
# save returns in the saltReturns collection in the json format:
# { 'minion': <minion_name>, 'jid': <job_id>, 'return': <return info with dots removed>,
# 'fun': <function>, 'full_ret': <unformatted return with dots removed>}
mdb.saltReturns.insert(sdata)
def save_load(jid, load):
'''
Save the load for a given job id
'''
conn, mdb = _get_conn(ret=None)
mdb.jobs.insert(load)
def get_load(jid):
'''
Return the load associated with a given job id
'''
conn, mdb = _get_conn(ret=None)
ret = mdb.jobs.find_one({'jid': jid})
return ret['load']
def get_jid(jid):
'''
Return the return information associated with a jid
'''
conn, mdb = _get_conn(ret=None)
ret = {}
rdata = mdb.saltReturns.find({'jid': jid})
if rdata:
for data in rdata:
minion = data['minion']
# return data in the format {<minion>: { <unformatted full return data>}}
ret[minion] = data['full_ret']
return ret
def get_fun(fun):
'''
Return the most recent jobs that have executed the named function
'''
conn, mdb = _get_conn(ret=None)
ret = {}
rdata = mdb.saltReturns.find_one({'fun': fun})
if rdata:
ret = rdata
return ret
def get_minions():
'''
Return a list of minions
'''
conn, mdb = _get_conn(ret=None)
ret = []
name = mdb.saltReturns.distinct('minion')
ret.append(name)
return ret
def get_jids():
'''
Return a list of job ids
'''
conn, mdb = _get_conn(ret=None)
ret = []
name = mdb.jobs.distinct('jid')
ret.append(name)
return ret
def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument
'''
Do any work necessary to prepare a JID, including sending a custom id
'''
return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid()
|
Python
| 0 |
@@ -1806,24 +1806,71 @@
ngo.version%0A
+ version = '.'.join(version.split('.')%5B:2%5D)%0A
HAS_PYMO
|
98d0e2d5aaff184afc598bef67491632c0eab066
|
Add save_load and get_load to mongo returner
|
salt/returners/mongo_future_return.py
|
salt/returners/mongo_future_return.py
|
'''
Return data to a mongodb server
Required python modules: pymongo
This returner will send data from the minions to a MongoDB server. To
configure the settings for your MongoDB server, add the following lines
to the minion config files::
mongo.db: <database name>
mongo.host: <server ip address>
mongo.user: <MongoDB username>
mongo.password: <MongoDB user password>
mongo.port: 27017
This mongo returner is being developed to replace the default mongodb returner
in the future and should not be considered api stable yet.
'''
# Import python libs
import logging
# Import third party libs
try:
import pymongo
has_pymongo = True
except ImportError:
has_pymongo = False
log = logging.getLogger(__name__)
def __virtual__():
if not has_pymongo:
return False
return 'mongo'
def _remove_dots(d):
output = {}
for k, v in d.iteritems():
if isinstance(v, dict):
v = _remove_dots(v)
output[k.replace('.', '-')] = v
return output
def _get_conn():
'''
Return a mongodb connection object
'''
conn = pymongo.Connection(
__salt__['config.option']('mongo.host'),
__salt__['config.option']('mongo.port'))
db = conn[__salt__['config.option']('mongo.db')]
user = __salt__['config.option']('mongo.user')
password = __salt__['config.option']('mongo.password')
if user and password:
db.authenticate(user, password)
return conn, db
def returner(ret):
'''
Return data to a mongodb server
'''
conn, db = _get_conn()
col = db[ret['id']]
back = {}
if isinstance(ret['return'], dict):
back = _remove_dots(ret['return'])
else:
back = ret['return']
log.debug(back)
sdata = {ret['jid']: back, 'fun': ret['fun']}
if 'out' in ret:
sdata['out'] = ret['out']
col.insert(sdata)
def get_jid(jid):
'''
Return the return information associated with a jid
'''
conn, db = _get_conn()
ret = {}
for collection in db.collection_names():
rdata = db[collection].find_one({jid: {'$exists': 'true'}})
if rdata:
ret[collection] = rdata
return ret
def get_fun(fun):
'''
Return the most recent jobs that have executed the named function
'''
conn, db = _get_conn()
ret = {}
for collection in db.collection_names():
rdata = db[collection].find_one({'fun': fun})
if rdata:
ret[collection] = rdata
return ret
|
Python
| 0 |
@@ -1886,24 +1886,315 @@
rt(sdata)%0A%0A%0A
+def save_load(jid, load):%0A '''%0A Save the load for a given job id%0A '''%0A conn, db = _get_conn()%0A col = db%5Bjid%5D%0A col.insert(load)%0A%0A%0Adef get_load(jid):%0A '''%0A Returnt he load asociated with a given job id%0A '''%0A conn, db = _get_conn()%0A return db%5Bjid%5D.find_one()%0A%0A%0A
def get_jid(
|
0aaa9000f8cf545bd5bfa41b6538d56c91dbde97
|
Update base box in sample config too
|
sampleconfigs/makebs.config.sample.py
|
sampleconfigs/makebs.config.sample.py
|
#!/usr/bin/env python2
# You will need to alter these before running ./makebuildserver
# Name of the base box to use...
basebox = "raring32"
# Location where raring32.box can be found, if you don't already have
# it. Could be set to https://f-droid.org/raring32.box if you like...
baseboxurl = "/shares/software/OS and Boot/raring32.box"
memory = 3584
# Debian package proxy server - set this to None unless you have one...
aptproxy = "http://192.168.0.19:8000"
# Set to True if your base box is 64 bit...
arch64 = False
|
Python
| 0 |
@@ -126,19 +126,20 @@
ebox = %22
-rar
+test
ing32%22%0A%0A
@@ -218,118 +218,217 @@
it.
-Could be set to https://f-droid.org/raring32.box if you like...%0Abaseboxurl = %22/shares/software/OS and Boot/rar
+For security reasons, it's recommended that you make your own%0A# in a secure environment using trusted media (see the manual) but%0A# you can use this default if you like...%0Abaseboxurl = %22https://f-droid.org/test
ing3
|
ce062240f9ff137a7f585d7f13f1144cca22e2e9
|
Tweak to notebook -> .py export, at Fernando's suggestion.
|
IPython/nbformat/v2/nbpy.py
|
IPython/nbformat/v2/nbpy.py
|
"""Read and write notebooks as regular .py files.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import re
from .rwbase import NotebookReader, NotebookWriter
from .nbbase import new_code_cell, new_text_cell, new_worksheet, new_notebook
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
_encoding_declaration_re = re.compile(r"^#\s*coding[:=]\s*([-\w.]+)")
class PyReaderError(Exception):
pass
class PyReader(NotebookReader):
def reads(self, s, **kwargs):
return self.to_notebook(s,**kwargs)
def to_notebook(self, s, **kwargs):
lines = s.splitlines()
cells = []
cell_lines = []
state = u'codecell'
for line in lines:
if line.startswith(u'# <nbformat>') or _encoding_declaration_re.match(line):
pass
elif line.startswith(u'# <codecell>'):
cell = self.new_cell(state, cell_lines)
if cell is not None:
cells.append(cell)
state = u'codecell'
cell_lines = []
elif line.startswith(u'# <htmlcell>'):
cell = self.new_cell(state, cell_lines)
if cell is not None:
cells.append(cell)
state = u'htmlcell'
cell_lines = []
elif line.startswith(u'# <markdowncell>'):
cell = self.new_cell(state, cell_lines)
if cell is not None:
cells.append(cell)
state = u'markdowncell'
cell_lines = []
else:
cell_lines.append(line)
if cell_lines and state == u'codecell':
cell = self.new_cell(state, cell_lines)
if cell is not None:
cells.append(cell)
ws = new_worksheet(cells=cells)
nb = new_notebook(worksheets=[ws])
return nb
def new_cell(self, state, lines):
if state == u'codecell':
input = u'\n'.join(lines)
input = input.strip(u'\n')
if input:
return new_code_cell(input=input)
elif state == u'htmlcell':
text = self._remove_comments(lines)
if text:
return new_text_cell(u'html',source=text)
elif state == u'markdowncell':
text = self._remove_comments(lines)
if text:
return new_text_cell(u'markdown',source=text)
def _remove_comments(self, lines):
new_lines = []
for line in lines:
if line.startswith(u'#'):
new_lines.append(line[2:])
else:
new_lines.append(line)
text = u'\n'.join(new_lines)
text = text.strip(u'\n')
return text
def split_lines_into_blocks(self, lines):
if len(lines) == 1:
yield lines[0]
raise StopIteration()
import ast
source = '\n'.join(lines)
code = ast.parse(source)
starts = [x.lineno-1 for x in code.body]
for i in range(len(starts)-1):
yield '\n'.join(lines[starts[i]:starts[i+1]]).strip('\n')
yield '\n'.join(lines[starts[-1]:]).strip('\n')
class PyWriter(NotebookWriter):
def writes(self, nb, **kwargs):
lines = []
lines.extend([u'# coding: utf-8', u'# <nbformat>2</nbformat>',''])
for ws in nb.worksheets:
for cell in ws.cells:
if cell.cell_type == u'code':
input = cell.get(u'input')
if input is not None:
lines.extend([u'# <codecell>',u''])
lines.extend(input.splitlines())
lines.append(u'')
elif cell.cell_type == u'html':
input = cell.get(u'source')
if input is not None:
lines.extend([u'# <htmlcell>',u''])
lines.extend([u'# ' + line for line in input.splitlines()])
lines.append(u'')
elif cell.cell_type == u'markdown':
input = cell.get(u'source')
if input is not None:
lines.extend([u'# <markdowncell>',u''])
lines.extend([u'# ' + line for line in input.splitlines()])
lines.append(u'')
lines.append('')
return unicode('\n'.join(lines))
_reader = PyReader()
_writer = PyWriter()
reads = _reader.reads
read = _reader.read
to_notebook = _reader.to_notebook
write = _writer.write
writes = _writer.writes
|
Python
| 0 |
@@ -3882,32 +3882,50 @@
lines = %5B
+u'# coding: utf-8'
%5D%0A lines.
@@ -3939,28 +3939,8 @@
%5Bu'#
- coding: utf-8', u'#
%3Cnb
|
f5ea4c44480b0beafd7d22b50228d50d3130e7a2
|
support for deleting files when using utils-process in auto-mode
|
utils/process.py
|
utils/process.py
|
#!/usr/bin/env python
# Copyright (C) 2010-2014 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
import sys
import time
import logging
import argparse
import multiprocessing
logging.basicConfig(level=logging.INFO)
log = logging.getLogger()
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), ".."))
from lib.cuckoo.common.config import Config
from lib.cuckoo.core.database import Database, TASK_REPORTED, TASK_COMPLETED
from lib.cuckoo.core.database import TASK_FAILED_PROCESSING
from lib.cuckoo.core.plugins import RunProcessing, RunSignatures, RunReporting
from lib.cuckoo.core.startup import init_modules
def process(aid, report=False):
results = RunProcessing(task_id=aid).run()
RunSignatures(results=results).run()
if report:
RunReporting(task_id=aid, results=results).run()
Database().set_status(aid, TASK_REPORTED)
def autoprocess(parallel=1):
cfg = Config()
maxcount = cfg.cuckoo.max_analysis_count
count = 0
db = Database()
pool = multiprocessing.Pool(parallel)
pending_results = []
# CAUTION - big ugly loop ahead
while count < maxcount or not maxcount:
# pending_results maintenance
for ar, tid in list(pending_results):
if ar.ready():
if ar.successful():
log.info("Task #%d: reports generation completed", tid)
else:
try:
ar.get()
except:
log.exception("Exception when processing task ID %u.", tid)
db.set_status(tid, TASK_FAILED_PROCESSING)
pending_results.remove((ar, tid))
# if still full, don't add more (necessary despite pool)
if len(pending_results) >= parallel:
time.sleep(1)
continue
# if we're here, getting #parallel tasks should at least have one we don't know
tasks = db.list_tasks(status=TASK_COMPLETED, limit=parallel)
# for loop to add only one, nice
for task in tasks:
# not-so-efficient lock
if task.id in [tid for ar, tid in pending_results]:
continue
log.info("Processing analysis data for Task #%d", task.id)
result = pool.apply_async(process, (task.id,), {"report": True})
pending_results.append((result, task.id))
count += 1
break
# if there wasn't anything to add, sleep tight
if not tasks:
time.sleep(5)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("id", type=str, help="ID of the analysis to process (auto for continuous processing of unprocessed tasks).")
parser.add_argument("-d", "--debug", help="Display debug messages", action="store_true", required=False)
parser.add_argument("-r", "--report", help="Re-generate report", action="store_true", required=False)
parser.add_argument("-p", "--parallel", help="Number of parallel threads to use (auto mode only).", type=int, required=False, default=1)
args = parser.parse_args()
if args.debug:
log.setLevel(logging.DEBUG)
init_modules()
if args.id == "auto":
autoprocess(parallel=args.parallel)
else:
process(args.id, report=args.report)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
Python
| 0 |
@@ -464,16 +464,68 @@
Config%0A
+from lib.cuckoo.common.constants import CUCKOO_ROOT%0A
from lib
@@ -799,22 +799,63 @@
ss(aid,
-report
+target=None, copy_path=None, report=False, auto
=False):
@@ -1067,16 +1067,249 @@
ORTED)%0A%0A
+ if auto:%0A if cfg.cuckoo.delete_original and os.path.exists(target):%0A os.unlink(target)%0A%0A if cfg.cuckoo.delete_bin_copy and os.path.exists(copy_path):%0A os.unlink(copy_path)%0A%0A
def auto
@@ -1333,27 +1333,8 @@
1):%0A
- cfg = Config()%0A
@@ -1610,24 +1610,43 @@
for ar, tid
+, target, copy_path
in list(pen
@@ -2106,16 +2106,35 @@
(ar, tid
+, target, copy_path
))%0A%0A
@@ -2591,16 +2591,62 @@
ar, tid
+, target, copy_path%0A
in pend
@@ -2757,16 +2757,248 @@
sk.id)%0A%0A
+ copy_path = os.path.join(CUCKOO_ROOT, %22storage%22, %22binaries%22,%0A task.sample.sha256)%0A%0A args = task.id, task.target, copy_path%0A kwargs = dict(report=True, auto=True)%0A
@@ -3040,37 +3040,22 @@
ss,
-(task.id,), %7B%22report%22: True%7D)
+args, kwargs)%0A
%0A
@@ -3102,16 +3102,40 @@
task.id
+, task.target, copy_path
))%0A%0A
@@ -4078,16 +4078,36 @@
ain__%22:%0A
+ cfg = Config()%0A%0A
try:
|
bcf6e41b489b5447186c063193d32714856bdfc7
|
Add some example docs
|
InvenTree/plugin/samples/integration/custom_panel_sample.py
|
InvenTree/plugin/samples/integration/custom_panel_sample.py
|
"""
Sample plugin which renders custom panels on certain pages
"""
from plugin import IntegrationPluginBase
from plugin.mixins import PanelMixin
from part.views import PartDetail
from stock.views import StockLocationDetail
class CustomPanelSample(PanelMixin, IntegrationPluginBase):
"""
A sample plugin which renders some custom panels.
"""
PLUGIN_NAME = "CustomPanelExample"
PLUGIN_SLUG = "panel"
PLUGIN_TITLE = "Custom Panel Example"
def render_location_info(self, loc):
"""
Demonstrate that we can render information particular to a page
"""
return f"""
<h5>Location Information</h5>
<em>This location has no sublocations!</em>
<ul>
<li><b>Name</b>: {loc.name}</li>
<li><b>Path</b>: {loc.pathstring}</li>
</ul>
"""
def get_custom_panels(self, view, request):
panels = [
{
# This 'hello world' panel will be displayed on any view which implements custom panels
'title': 'Hello World',
'icon': 'fas fa-boxes',
'content': '<b>Hello world!</b>',
'description': 'A simple panel which renders hello world',
'javascript': 'alert("Hello world");',
},
{
# This panel will not be displayed, as it is missing the 'content' key
'title': 'No Content',
}
]
# This panel will *only* display on the PartDetail view
if isinstance(view, PartDetail):
panels.append({
'title': 'Custom Part Panel',
'icon': 'fas fa-shapes',
'content': '<em>This content only appears on the PartDetail page, you know!</em>',
})
# This panel will *only* display on the StockLocation view,
# and *only* if the StockLocation has *no* child locations
if isinstance(view, StockLocationDetail):
try:
loc = view.get_object()
if not loc.get_descendants(include_self=False).exists():
panels.append({
'title': 'Childless Location',
'icon': 'fa-user',
'content': self.render_location_info(loc),
})
except:
pass
return panels
|
Python
| 0.000001 |
@@ -879,24 +879,303 @@
request):%0A%0A
+ %22%22%22%0A You can decide at run-time which custom panels you want to display!%0A%0A - Display on every page%0A - Only on a single page or set of pages%0A - Only for a specific instance (e.g. part)%0A - Based on the user viewing the page!%0A %22%22%22%0A%0A
pane
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.